1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <linux/percpu.h> 10 #include <linux/dynamic_queue_limits.h> 11 #include <net/gen_stats.h> 12 #include <net/rtnetlink.h> 13 14 struct Qdisc_ops; 15 struct qdisc_walker; 16 struct tcf_walker; 17 struct module; 18 19 struct qdisc_rate_table { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 enum qdisc_state_t { 27 __QDISC_STATE_SCHED, 28 __QDISC_STATE_DEACTIVATED, 29 }; 30 31 struct qdisc_size_table { 32 struct rcu_head rcu; 33 struct list_head list; 34 struct tc_sizespec szopts; 35 int refcnt; 36 u16 data[]; 37 }; 38 39 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 40 struct qdisc_skb_head { 41 struct sk_buff *head; 42 struct sk_buff *tail; 43 __u32 qlen; 44 spinlock_t lock; 45 }; 46 47 struct Qdisc { 48 int (*enqueue)(struct sk_buff *skb, 49 struct Qdisc *sch, 50 struct sk_buff **to_free); 51 struct sk_buff * (*dequeue)(struct Qdisc *sch); 52 unsigned int flags; 53 #define TCQ_F_BUILTIN 1 54 #define TCQ_F_INGRESS 2 55 #define TCQ_F_CAN_BYPASS 4 56 #define TCQ_F_MQROOT 8 57 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 58 * q->dev_queue : It can test 59 * netif_xmit_frozen_or_stopped() before 60 * dequeueing next packet. 61 * Its true for MQ/MQPRIO slaves, or non 62 * multiqueue device. 63 */ 64 #define TCQ_F_WARN_NONWC (1 << 16) 65 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 66 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 67 * qdisc_tree_decrease_qlen() should stop. 68 */ 69 u32 limit; 70 const struct Qdisc_ops *ops; 71 struct qdisc_size_table __rcu *stab; 72 struct hlist_node hash; 73 u32 handle; 74 u32 parent; 75 void *u32_node; 76 77 struct netdev_queue *dev_queue; 78 79 struct net_rate_estimator __rcu *rate_est; 80 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 81 struct gnet_stats_queue __percpu *cpu_qstats; 82 83 /* 84 * For performance sake on SMP, we put highly modified fields at the end 85 */ 86 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 87 struct qdisc_skb_head q; 88 struct gnet_stats_basic_packed bstats; 89 seqcount_t running; 90 struct gnet_stats_queue qstats; 91 unsigned long state; 92 struct Qdisc *next_sched; 93 struct sk_buff *skb_bad_txq; 94 struct rcu_head rcu_head; 95 int padded; 96 atomic_t refcnt; 97 98 spinlock_t busylock ____cacheline_aligned_in_smp; 99 }; 100 101 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 102 { 103 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 104 } 105 106 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 107 { 108 if (qdisc_is_running(qdisc)) 109 return false; 110 /* Variant of write_seqcount_begin() telling lockdep a trylock 111 * was attempted. 112 */ 113 raw_write_seqcount_begin(&qdisc->running); 114 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 115 return true; 116 } 117 118 static inline void qdisc_run_end(struct Qdisc *qdisc) 119 { 120 write_seqcount_end(&qdisc->running); 121 } 122 123 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 124 { 125 return qdisc->flags & TCQ_F_ONETXQUEUE; 126 } 127 128 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 129 { 130 #ifdef CONFIG_BQL 131 /* Non-BQL migrated drivers will return 0, too. */ 132 return dql_avail(&txq->dql); 133 #else 134 return 0; 135 #endif 136 } 137 138 struct Qdisc_class_ops { 139 /* Child qdisc manipulation */ 140 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 141 int (*graft)(struct Qdisc *, unsigned long cl, 142 struct Qdisc *, struct Qdisc **); 143 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 144 void (*qlen_notify)(struct Qdisc *, unsigned long); 145 146 /* Class manipulation routines */ 147 unsigned long (*get)(struct Qdisc *, u32 classid); 148 void (*put)(struct Qdisc *, unsigned long); 149 int (*change)(struct Qdisc *, u32, u32, 150 struct nlattr **, unsigned long *); 151 int (*delete)(struct Qdisc *, unsigned long); 152 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 153 154 /* Filter manipulation */ 155 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 156 bool (*tcf_cl_offload)(u32 classid); 157 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 158 u32 classid); 159 void (*unbind_tcf)(struct Qdisc *, unsigned long); 160 161 /* rtnetlink specific */ 162 int (*dump)(struct Qdisc *, unsigned long, 163 struct sk_buff *skb, struct tcmsg*); 164 int (*dump_stats)(struct Qdisc *, unsigned long, 165 struct gnet_dump *); 166 }; 167 168 struct Qdisc_ops { 169 struct Qdisc_ops *next; 170 const struct Qdisc_class_ops *cl_ops; 171 char id[IFNAMSIZ]; 172 int priv_size; 173 174 int (*enqueue)(struct sk_buff *skb, 175 struct Qdisc *sch, 176 struct sk_buff **to_free); 177 struct sk_buff * (*dequeue)(struct Qdisc *); 178 struct sk_buff * (*peek)(struct Qdisc *); 179 180 int (*init)(struct Qdisc *, struct nlattr *arg); 181 void (*reset)(struct Qdisc *); 182 void (*destroy)(struct Qdisc *); 183 int (*change)(struct Qdisc *, struct nlattr *arg); 184 void (*attach)(struct Qdisc *); 185 186 int (*dump)(struct Qdisc *, struct sk_buff *); 187 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 188 189 struct module *owner; 190 }; 191 192 193 struct tcf_result { 194 unsigned long class; 195 u32 classid; 196 }; 197 198 struct tcf_proto_ops { 199 struct list_head head; 200 char kind[IFNAMSIZ]; 201 202 int (*classify)(struct sk_buff *, 203 const struct tcf_proto *, 204 struct tcf_result *); 205 int (*init)(struct tcf_proto*); 206 bool (*destroy)(struct tcf_proto*, bool); 207 208 unsigned long (*get)(struct tcf_proto*, u32 handle); 209 int (*change)(struct net *net, struct sk_buff *, 210 struct tcf_proto*, unsigned long, 211 u32 handle, struct nlattr **, 212 unsigned long *, bool); 213 int (*delete)(struct tcf_proto*, unsigned long); 214 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 215 216 /* rtnetlink specific */ 217 int (*dump)(struct net*, struct tcf_proto*, unsigned long, 218 struct sk_buff *skb, struct tcmsg*); 219 220 struct module *owner; 221 }; 222 223 struct tcf_proto { 224 /* Fast access part */ 225 struct tcf_proto __rcu *next; 226 void __rcu *root; 227 int (*classify)(struct sk_buff *, 228 const struct tcf_proto *, 229 struct tcf_result *); 230 __be16 protocol; 231 232 /* All the rest */ 233 u32 prio; 234 u32 classid; 235 struct Qdisc *q; 236 void *data; 237 const struct tcf_proto_ops *ops; 238 struct rcu_head rcu; 239 }; 240 241 struct qdisc_skb_cb { 242 unsigned int pkt_len; 243 u16 slave_dev_queue_mapping; 244 u16 tc_classid; 245 #define QDISC_CB_PRIV_LEN 20 246 unsigned char data[QDISC_CB_PRIV_LEN]; 247 }; 248 249 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 250 { 251 struct qdisc_skb_cb *qcb; 252 253 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 254 BUILD_BUG_ON(sizeof(qcb->data) < sz); 255 } 256 257 static inline int qdisc_qlen(const struct Qdisc *q) 258 { 259 return q->q.qlen; 260 } 261 262 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 263 { 264 return (struct qdisc_skb_cb *)skb->cb; 265 } 266 267 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 268 { 269 return &qdisc->q.lock; 270 } 271 272 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 273 { 274 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 275 276 return q; 277 } 278 279 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 280 { 281 return qdisc->dev_queue->qdisc_sleeping; 282 } 283 284 /* The qdisc root lock is a mechanism by which to top level 285 * of a qdisc tree can be locked from any qdisc node in the 286 * forest. This allows changing the configuration of some 287 * aspect of the qdisc tree while blocking out asynchronous 288 * qdisc access in the packet processing paths. 289 * 290 * It is only legal to do this when the root will not change 291 * on us. Otherwise we'll potentially lock the wrong qdisc 292 * root. This is enforced by holding the RTNL semaphore, which 293 * all users of this lock accessor must do. 294 */ 295 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 296 { 297 struct Qdisc *root = qdisc_root(qdisc); 298 299 ASSERT_RTNL(); 300 return qdisc_lock(root); 301 } 302 303 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 304 { 305 struct Qdisc *root = qdisc_root_sleeping(qdisc); 306 307 ASSERT_RTNL(); 308 return qdisc_lock(root); 309 } 310 311 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 312 { 313 struct Qdisc *root = qdisc_root_sleeping(qdisc); 314 315 ASSERT_RTNL(); 316 return &root->running; 317 } 318 319 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 320 { 321 return qdisc->dev_queue->dev; 322 } 323 324 static inline void sch_tree_lock(const struct Qdisc *q) 325 { 326 spin_lock_bh(qdisc_root_sleeping_lock(q)); 327 } 328 329 static inline void sch_tree_unlock(const struct Qdisc *q) 330 { 331 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 332 } 333 334 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 335 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 336 337 extern struct Qdisc noop_qdisc; 338 extern struct Qdisc_ops noop_qdisc_ops; 339 extern struct Qdisc_ops pfifo_fast_ops; 340 extern struct Qdisc_ops mq_qdisc_ops; 341 extern struct Qdisc_ops noqueue_qdisc_ops; 342 extern const struct Qdisc_ops *default_qdisc_ops; 343 static inline const struct Qdisc_ops * 344 get_default_qdisc_ops(const struct net_device *dev, int ntx) 345 { 346 return ntx < dev->real_num_tx_queues ? 347 default_qdisc_ops : &pfifo_fast_ops; 348 } 349 350 struct Qdisc_class_common { 351 u32 classid; 352 struct hlist_node hnode; 353 }; 354 355 struct Qdisc_class_hash { 356 struct hlist_head *hash; 357 unsigned int hashsize; 358 unsigned int hashmask; 359 unsigned int hashelems; 360 }; 361 362 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 363 { 364 id ^= id >> 8; 365 id ^= id >> 4; 366 return id & mask; 367 } 368 369 static inline struct Qdisc_class_common * 370 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 371 { 372 struct Qdisc_class_common *cl; 373 unsigned int h; 374 375 h = qdisc_class_hash(id, hash->hashmask); 376 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 377 if (cl->classid == id) 378 return cl; 379 } 380 return NULL; 381 } 382 383 int qdisc_class_hash_init(struct Qdisc_class_hash *); 384 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 385 struct Qdisc_class_common *); 386 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 387 struct Qdisc_class_common *); 388 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 389 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 390 391 void dev_init_scheduler(struct net_device *dev); 392 void dev_shutdown(struct net_device *dev); 393 void dev_activate(struct net_device *dev); 394 void dev_deactivate(struct net_device *dev); 395 void dev_deactivate_many(struct list_head *head); 396 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 397 struct Qdisc *qdisc); 398 void qdisc_reset(struct Qdisc *qdisc); 399 void qdisc_destroy(struct Qdisc *qdisc); 400 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 401 unsigned int len); 402 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 403 const struct Qdisc_ops *ops); 404 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 405 const struct Qdisc_ops *ops, u32 parentid); 406 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 407 const struct qdisc_size_table *stab); 408 int skb_do_redirect(struct sk_buff *); 409 410 static inline void skb_reset_tc(struct sk_buff *skb) 411 { 412 #ifdef CONFIG_NET_CLS_ACT 413 skb->tc_redirected = 0; 414 #endif 415 } 416 417 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 418 { 419 #ifdef CONFIG_NET_CLS_ACT 420 return skb->tc_at_ingress; 421 #else 422 return false; 423 #endif 424 } 425 426 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 427 { 428 #ifdef CONFIG_NET_CLS_ACT 429 if (skb->tc_skip_classify) { 430 skb->tc_skip_classify = 0; 431 return true; 432 } 433 #endif 434 return false; 435 } 436 437 /* Reset all TX qdiscs greater then index of a device. */ 438 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 439 { 440 struct Qdisc *qdisc; 441 442 for (; i < dev->num_tx_queues; i++) { 443 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 444 if (qdisc) { 445 spin_lock_bh(qdisc_lock(qdisc)); 446 qdisc_reset(qdisc); 447 spin_unlock_bh(qdisc_lock(qdisc)); 448 } 449 } 450 } 451 452 static inline void qdisc_reset_all_tx(struct net_device *dev) 453 { 454 qdisc_reset_all_tx_gt(dev, 0); 455 } 456 457 /* Are all TX queues of the device empty? */ 458 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 459 { 460 unsigned int i; 461 462 rcu_read_lock(); 463 for (i = 0; i < dev->num_tx_queues; i++) { 464 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 465 const struct Qdisc *q = rcu_dereference(txq->qdisc); 466 467 if (q->q.qlen) { 468 rcu_read_unlock(); 469 return false; 470 } 471 } 472 rcu_read_unlock(); 473 return true; 474 } 475 476 /* Are any of the TX qdiscs changing? */ 477 static inline bool qdisc_tx_changing(const struct net_device *dev) 478 { 479 unsigned int i; 480 481 for (i = 0; i < dev->num_tx_queues; i++) { 482 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 483 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 484 return true; 485 } 486 return false; 487 } 488 489 /* Is the device using the noop qdisc on all queues? */ 490 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 491 { 492 unsigned int i; 493 494 for (i = 0; i < dev->num_tx_queues; i++) { 495 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 496 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 497 return false; 498 } 499 return true; 500 } 501 502 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 503 { 504 return qdisc_skb_cb(skb)->pkt_len; 505 } 506 507 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 508 enum net_xmit_qdisc_t { 509 __NET_XMIT_STOLEN = 0x00010000, 510 __NET_XMIT_BYPASS = 0x00020000, 511 }; 512 513 #ifdef CONFIG_NET_CLS_ACT 514 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 515 #else 516 #define net_xmit_drop_count(e) (1) 517 #endif 518 519 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 520 const struct Qdisc *sch) 521 { 522 #ifdef CONFIG_NET_SCHED 523 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 524 525 if (stab) 526 __qdisc_calculate_pkt_len(skb, stab); 527 #endif 528 } 529 530 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 531 struct sk_buff **to_free) 532 { 533 qdisc_calculate_pkt_len(skb, sch); 534 return sch->enqueue(skb, sch, to_free); 535 } 536 537 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 538 { 539 return q->flags & TCQ_F_CPUSTATS; 540 } 541 542 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 543 __u64 bytes, __u32 packets) 544 { 545 bstats->bytes += bytes; 546 bstats->packets += packets; 547 } 548 549 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 550 const struct sk_buff *skb) 551 { 552 _bstats_update(bstats, 553 qdisc_pkt_len(skb), 554 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 555 } 556 557 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 558 __u64 bytes, __u32 packets) 559 { 560 u64_stats_update_begin(&bstats->syncp); 561 _bstats_update(&bstats->bstats, bytes, packets); 562 u64_stats_update_end(&bstats->syncp); 563 } 564 565 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 566 const struct sk_buff *skb) 567 { 568 u64_stats_update_begin(&bstats->syncp); 569 bstats_update(&bstats->bstats, skb); 570 u64_stats_update_end(&bstats->syncp); 571 } 572 573 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 574 const struct sk_buff *skb) 575 { 576 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 577 } 578 579 static inline void qdisc_bstats_update(struct Qdisc *sch, 580 const struct sk_buff *skb) 581 { 582 bstats_update(&sch->bstats, skb); 583 } 584 585 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 586 const struct sk_buff *skb) 587 { 588 sch->qstats.backlog -= qdisc_pkt_len(skb); 589 } 590 591 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 592 const struct sk_buff *skb) 593 { 594 sch->qstats.backlog += qdisc_pkt_len(skb); 595 } 596 597 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 598 { 599 sch->qstats.drops += count; 600 } 601 602 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 603 { 604 qstats->drops++; 605 } 606 607 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 608 { 609 qstats->overlimits++; 610 } 611 612 static inline void qdisc_qstats_drop(struct Qdisc *sch) 613 { 614 qstats_drop_inc(&sch->qstats); 615 } 616 617 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 618 { 619 this_cpu_inc(sch->cpu_qstats->drops); 620 } 621 622 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 623 { 624 sch->qstats.overlimits++; 625 } 626 627 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 628 { 629 qh->head = NULL; 630 qh->tail = NULL; 631 qh->qlen = 0; 632 } 633 634 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 635 struct qdisc_skb_head *qh) 636 { 637 struct sk_buff *last = qh->tail; 638 639 if (last) { 640 skb->next = NULL; 641 last->next = skb; 642 qh->tail = skb; 643 } else { 644 qh->tail = skb; 645 qh->head = skb; 646 } 647 qh->qlen++; 648 qdisc_qstats_backlog_inc(sch, skb); 649 650 return NET_XMIT_SUCCESS; 651 } 652 653 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 654 { 655 return __qdisc_enqueue_tail(skb, sch, &sch->q); 656 } 657 658 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 659 { 660 struct sk_buff *skb = qh->head; 661 662 if (likely(skb != NULL)) { 663 qh->head = skb->next; 664 qh->qlen--; 665 if (qh->head == NULL) 666 qh->tail = NULL; 667 skb->next = NULL; 668 } 669 670 return skb; 671 } 672 673 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 674 { 675 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 676 677 if (likely(skb != NULL)) { 678 qdisc_qstats_backlog_dec(sch, skb); 679 qdisc_bstats_update(sch, skb); 680 } 681 682 return skb; 683 } 684 685 /* Instead of calling kfree_skb() while root qdisc lock is held, 686 * queue the skb for future freeing at end of __dev_xmit_skb() 687 */ 688 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 689 { 690 skb->next = *to_free; 691 *to_free = skb; 692 } 693 694 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 695 struct qdisc_skb_head *qh, 696 struct sk_buff **to_free) 697 { 698 struct sk_buff *skb = __qdisc_dequeue_head(qh); 699 700 if (likely(skb != NULL)) { 701 unsigned int len = qdisc_pkt_len(skb); 702 703 qdisc_qstats_backlog_dec(sch, skb); 704 __qdisc_drop(skb, to_free); 705 return len; 706 } 707 708 return 0; 709 } 710 711 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 712 struct sk_buff **to_free) 713 { 714 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 715 } 716 717 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 718 { 719 const struct qdisc_skb_head *qh = &sch->q; 720 721 return qh->head; 722 } 723 724 /* generic pseudo peek method for non-work-conserving qdisc */ 725 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 726 { 727 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 728 if (!sch->gso_skb) { 729 sch->gso_skb = sch->dequeue(sch); 730 if (sch->gso_skb) { 731 /* it's still part of the queue */ 732 qdisc_qstats_backlog_inc(sch, sch->gso_skb); 733 sch->q.qlen++; 734 } 735 } 736 737 return sch->gso_skb; 738 } 739 740 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 741 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 742 { 743 struct sk_buff *skb = sch->gso_skb; 744 745 if (skb) { 746 sch->gso_skb = NULL; 747 qdisc_qstats_backlog_dec(sch, skb); 748 sch->q.qlen--; 749 } else { 750 skb = sch->dequeue(sch); 751 } 752 753 return skb; 754 } 755 756 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 757 { 758 /* 759 * We do not know the backlog in bytes of this list, it 760 * is up to the caller to correct it 761 */ 762 ASSERT_RTNL(); 763 if (qh->qlen) { 764 rtnl_kfree_skbs(qh->head, qh->tail); 765 766 qh->head = NULL; 767 qh->tail = NULL; 768 qh->qlen = 0; 769 } 770 } 771 772 static inline void qdisc_reset_queue(struct Qdisc *sch) 773 { 774 __qdisc_reset_queue(&sch->q); 775 sch->qstats.backlog = 0; 776 } 777 778 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 779 struct Qdisc **pold) 780 { 781 struct Qdisc *old; 782 783 sch_tree_lock(sch); 784 old = *pold; 785 *pold = new; 786 if (old != NULL) { 787 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); 788 qdisc_reset(old); 789 } 790 sch_tree_unlock(sch); 791 792 return old; 793 } 794 795 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 796 { 797 rtnl_kfree_skbs(skb, skb); 798 qdisc_qstats_drop(sch); 799 } 800 801 802 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 803 struct sk_buff **to_free) 804 { 805 __qdisc_drop(skb, to_free); 806 qdisc_qstats_drop(sch); 807 808 return NET_XMIT_DROP; 809 } 810 811 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 812 long it will take to send a packet given its size. 813 */ 814 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 815 { 816 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 817 if (slot < 0) 818 slot = 0; 819 slot >>= rtab->rate.cell_log; 820 if (slot > 255) 821 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 822 return rtab->data[slot]; 823 } 824 825 struct psched_ratecfg { 826 u64 rate_bytes_ps; /* bytes per second */ 827 u32 mult; 828 u16 overhead; 829 u8 linklayer; 830 u8 shift; 831 }; 832 833 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 834 unsigned int len) 835 { 836 len += r->overhead; 837 838 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 839 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 840 841 return ((u64)len * r->mult) >> r->shift; 842 } 843 844 void psched_ratecfg_precompute(struct psched_ratecfg *r, 845 const struct tc_ratespec *conf, 846 u64 rate64); 847 848 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 849 const struct psched_ratecfg *r) 850 { 851 memset(res, 0, sizeof(*res)); 852 853 /* legacy struct tc_ratespec has a 32bit @rate field 854 * Qdisc using 64bit rate should add new attributes 855 * in order to maintain compatibility. 856 */ 857 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 858 859 res->overhead = r->overhead; 860 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 861 } 862 863 #endif 864