1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <linux/percpu.h> 10 #include <linux/dynamic_queue_limits.h> 11 #include <net/gen_stats.h> 12 #include <net/rtnetlink.h> 13 14 struct Qdisc_ops; 15 struct qdisc_walker; 16 struct tcf_walker; 17 struct module; 18 19 struct qdisc_rate_table { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 enum qdisc_state_t { 27 __QDISC_STATE_SCHED, 28 __QDISC_STATE_DEACTIVATED, 29 }; 30 31 struct qdisc_size_table { 32 struct rcu_head rcu; 33 struct list_head list; 34 struct tc_sizespec szopts; 35 int refcnt; 36 u16 data[]; 37 }; 38 39 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 40 struct qdisc_skb_head { 41 struct sk_buff *head; 42 struct sk_buff *tail; 43 __u32 qlen; 44 spinlock_t lock; 45 }; 46 47 struct Qdisc { 48 int (*enqueue)(struct sk_buff *skb, 49 struct Qdisc *sch, 50 struct sk_buff **to_free); 51 struct sk_buff * (*dequeue)(struct Qdisc *sch); 52 unsigned int flags; 53 #define TCQ_F_BUILTIN 1 54 #define TCQ_F_INGRESS 2 55 #define TCQ_F_CAN_BYPASS 4 56 #define TCQ_F_MQROOT 8 57 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 58 * q->dev_queue : It can test 59 * netif_xmit_frozen_or_stopped() before 60 * dequeueing next packet. 61 * Its true for MQ/MQPRIO slaves, or non 62 * multiqueue device. 63 */ 64 #define TCQ_F_WARN_NONWC (1 << 16) 65 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 66 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 67 * qdisc_tree_decrease_qlen() should stop. 68 */ 69 u32 limit; 70 const struct Qdisc_ops *ops; 71 struct qdisc_size_table __rcu *stab; 72 struct hlist_node hash; 73 u32 handle; 74 u32 parent; 75 void *u32_node; 76 77 struct netdev_queue *dev_queue; 78 79 struct net_rate_estimator __rcu *rate_est; 80 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 81 struct gnet_stats_queue __percpu *cpu_qstats; 82 83 /* 84 * For performance sake on SMP, we put highly modified fields at the end 85 */ 86 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 87 struct qdisc_skb_head q; 88 struct gnet_stats_basic_packed bstats; 89 seqcount_t running; 90 struct gnet_stats_queue qstats; 91 unsigned long state; 92 struct Qdisc *next_sched; 93 struct sk_buff *skb_bad_txq; 94 struct rcu_head rcu_head; 95 int padded; 96 atomic_t refcnt; 97 98 spinlock_t busylock ____cacheline_aligned_in_smp; 99 }; 100 101 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 102 { 103 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 104 } 105 106 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 107 { 108 if (qdisc_is_running(qdisc)) 109 return false; 110 /* Variant of write_seqcount_begin() telling lockdep a trylock 111 * was attempted. 112 */ 113 raw_write_seqcount_begin(&qdisc->running); 114 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 115 return true; 116 } 117 118 static inline void qdisc_run_end(struct Qdisc *qdisc) 119 { 120 write_seqcount_end(&qdisc->running); 121 } 122 123 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 124 { 125 return qdisc->flags & TCQ_F_ONETXQUEUE; 126 } 127 128 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 129 { 130 #ifdef CONFIG_BQL 131 /* Non-BQL migrated drivers will return 0, too. */ 132 return dql_avail(&txq->dql); 133 #else 134 return 0; 135 #endif 136 } 137 138 struct Qdisc_class_ops { 139 /* Child qdisc manipulation */ 140 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 141 int (*graft)(struct Qdisc *, unsigned long cl, 142 struct Qdisc *, struct Qdisc **); 143 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 144 void (*qlen_notify)(struct Qdisc *, unsigned long); 145 146 /* Class manipulation routines */ 147 unsigned long (*get)(struct Qdisc *, u32 classid); 148 void (*put)(struct Qdisc *, unsigned long); 149 int (*change)(struct Qdisc *, u32, u32, 150 struct nlattr **, unsigned long *); 151 int (*delete)(struct Qdisc *, unsigned long); 152 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 153 154 /* Filter manipulation */ 155 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 156 bool (*tcf_cl_offload)(u32 classid); 157 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 158 u32 classid); 159 void (*unbind_tcf)(struct Qdisc *, unsigned long); 160 161 /* rtnetlink specific */ 162 int (*dump)(struct Qdisc *, unsigned long, 163 struct sk_buff *skb, struct tcmsg*); 164 int (*dump_stats)(struct Qdisc *, unsigned long, 165 struct gnet_dump *); 166 }; 167 168 struct Qdisc_ops { 169 struct Qdisc_ops *next; 170 const struct Qdisc_class_ops *cl_ops; 171 char id[IFNAMSIZ]; 172 int priv_size; 173 174 int (*enqueue)(struct sk_buff *skb, 175 struct Qdisc *sch, 176 struct sk_buff **to_free); 177 struct sk_buff * (*dequeue)(struct Qdisc *); 178 struct sk_buff * (*peek)(struct Qdisc *); 179 180 int (*init)(struct Qdisc *, struct nlattr *arg); 181 void (*reset)(struct Qdisc *); 182 void (*destroy)(struct Qdisc *); 183 int (*change)(struct Qdisc *, struct nlattr *arg); 184 void (*attach)(struct Qdisc *); 185 186 int (*dump)(struct Qdisc *, struct sk_buff *); 187 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 188 189 struct module *owner; 190 }; 191 192 193 struct tcf_result { 194 unsigned long class; 195 u32 classid; 196 }; 197 198 struct tcf_proto_ops { 199 struct list_head head; 200 char kind[IFNAMSIZ]; 201 202 int (*classify)(struct sk_buff *, 203 const struct tcf_proto *, 204 struct tcf_result *); 205 int (*init)(struct tcf_proto*); 206 bool (*destroy)(struct tcf_proto*, bool); 207 208 unsigned long (*get)(struct tcf_proto*, u32 handle); 209 int (*change)(struct net *net, struct sk_buff *, 210 struct tcf_proto*, unsigned long, 211 u32 handle, struct nlattr **, 212 unsigned long *, bool); 213 int (*delete)(struct tcf_proto*, unsigned long); 214 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 215 216 /* rtnetlink specific */ 217 int (*dump)(struct net*, struct tcf_proto*, unsigned long, 218 struct sk_buff *skb, struct tcmsg*); 219 220 struct module *owner; 221 }; 222 223 struct tcf_proto { 224 /* Fast access part */ 225 struct tcf_proto __rcu *next; 226 void __rcu *root; 227 int (*classify)(struct sk_buff *, 228 const struct tcf_proto *, 229 struct tcf_result *); 230 __be16 protocol; 231 232 /* All the rest */ 233 u32 prio; 234 u32 classid; 235 struct Qdisc *q; 236 void *data; 237 const struct tcf_proto_ops *ops; 238 struct rcu_head rcu; 239 }; 240 241 struct qdisc_skb_cb { 242 unsigned int pkt_len; 243 u16 slave_dev_queue_mapping; 244 u16 tc_classid; 245 #define QDISC_CB_PRIV_LEN 20 246 unsigned char data[QDISC_CB_PRIV_LEN]; 247 }; 248 249 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 250 { 251 struct qdisc_skb_cb *qcb; 252 253 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 254 BUILD_BUG_ON(sizeof(qcb->data) < sz); 255 } 256 257 static inline int qdisc_qlen(const struct Qdisc *q) 258 { 259 return q->q.qlen; 260 } 261 262 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 263 { 264 return (struct qdisc_skb_cb *)skb->cb; 265 } 266 267 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 268 { 269 return &qdisc->q.lock; 270 } 271 272 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 273 { 274 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 275 276 return q; 277 } 278 279 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 280 { 281 return qdisc->dev_queue->qdisc_sleeping; 282 } 283 284 /* The qdisc root lock is a mechanism by which to top level 285 * of a qdisc tree can be locked from any qdisc node in the 286 * forest. This allows changing the configuration of some 287 * aspect of the qdisc tree while blocking out asynchronous 288 * qdisc access in the packet processing paths. 289 * 290 * It is only legal to do this when the root will not change 291 * on us. Otherwise we'll potentially lock the wrong qdisc 292 * root. This is enforced by holding the RTNL semaphore, which 293 * all users of this lock accessor must do. 294 */ 295 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 296 { 297 struct Qdisc *root = qdisc_root(qdisc); 298 299 ASSERT_RTNL(); 300 return qdisc_lock(root); 301 } 302 303 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 304 { 305 struct Qdisc *root = qdisc_root_sleeping(qdisc); 306 307 ASSERT_RTNL(); 308 return qdisc_lock(root); 309 } 310 311 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 312 { 313 struct Qdisc *root = qdisc_root_sleeping(qdisc); 314 315 ASSERT_RTNL(); 316 return &root->running; 317 } 318 319 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 320 { 321 return qdisc->dev_queue->dev; 322 } 323 324 static inline void sch_tree_lock(const struct Qdisc *q) 325 { 326 spin_lock_bh(qdisc_root_sleeping_lock(q)); 327 } 328 329 static inline void sch_tree_unlock(const struct Qdisc *q) 330 { 331 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 332 } 333 334 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 335 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 336 337 extern struct Qdisc noop_qdisc; 338 extern struct Qdisc_ops noop_qdisc_ops; 339 extern struct Qdisc_ops pfifo_fast_ops; 340 extern struct Qdisc_ops mq_qdisc_ops; 341 extern struct Qdisc_ops noqueue_qdisc_ops; 342 extern const struct Qdisc_ops *default_qdisc_ops; 343 static inline const struct Qdisc_ops * 344 get_default_qdisc_ops(const struct net_device *dev, int ntx) 345 { 346 return ntx < dev->real_num_tx_queues ? 347 default_qdisc_ops : &pfifo_fast_ops; 348 } 349 350 struct Qdisc_class_common { 351 u32 classid; 352 struct hlist_node hnode; 353 }; 354 355 struct Qdisc_class_hash { 356 struct hlist_head *hash; 357 unsigned int hashsize; 358 unsigned int hashmask; 359 unsigned int hashelems; 360 }; 361 362 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 363 { 364 id ^= id >> 8; 365 id ^= id >> 4; 366 return id & mask; 367 } 368 369 static inline struct Qdisc_class_common * 370 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 371 { 372 struct Qdisc_class_common *cl; 373 unsigned int h; 374 375 h = qdisc_class_hash(id, hash->hashmask); 376 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 377 if (cl->classid == id) 378 return cl; 379 } 380 return NULL; 381 } 382 383 int qdisc_class_hash_init(struct Qdisc_class_hash *); 384 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 385 struct Qdisc_class_common *); 386 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 387 struct Qdisc_class_common *); 388 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 389 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 390 391 void dev_init_scheduler(struct net_device *dev); 392 void dev_shutdown(struct net_device *dev); 393 void dev_activate(struct net_device *dev); 394 void dev_deactivate(struct net_device *dev); 395 void dev_deactivate_many(struct list_head *head); 396 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 397 struct Qdisc *qdisc); 398 void qdisc_reset(struct Qdisc *qdisc); 399 void qdisc_destroy(struct Qdisc *qdisc); 400 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 401 unsigned int len); 402 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 403 const struct Qdisc_ops *ops); 404 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 405 const struct Qdisc_ops *ops, u32 parentid); 406 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 407 const struct qdisc_size_table *stab); 408 bool tcf_destroy(struct tcf_proto *tp, bool force); 409 void tcf_destroy_chain(struct tcf_proto __rcu **fl); 410 int skb_do_redirect(struct sk_buff *); 411 412 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 413 { 414 #ifdef CONFIG_NET_CLS_ACT 415 return G_TC_AT(skb->tc_verd) & AT_INGRESS; 416 #else 417 return false; 418 #endif 419 } 420 421 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 422 { 423 #ifdef CONFIG_NET_CLS_ACT 424 if (skb->tc_skip_classify) { 425 skb->tc_skip_classify = 0; 426 return true; 427 } 428 #endif 429 return false; 430 } 431 432 /* Reset all TX qdiscs greater then index of a device. */ 433 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 434 { 435 struct Qdisc *qdisc; 436 437 for (; i < dev->num_tx_queues; i++) { 438 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 439 if (qdisc) { 440 spin_lock_bh(qdisc_lock(qdisc)); 441 qdisc_reset(qdisc); 442 spin_unlock_bh(qdisc_lock(qdisc)); 443 } 444 } 445 } 446 447 static inline void qdisc_reset_all_tx(struct net_device *dev) 448 { 449 qdisc_reset_all_tx_gt(dev, 0); 450 } 451 452 /* Are all TX queues of the device empty? */ 453 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 454 { 455 unsigned int i; 456 457 rcu_read_lock(); 458 for (i = 0; i < dev->num_tx_queues; i++) { 459 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 460 const struct Qdisc *q = rcu_dereference(txq->qdisc); 461 462 if (q->q.qlen) { 463 rcu_read_unlock(); 464 return false; 465 } 466 } 467 rcu_read_unlock(); 468 return true; 469 } 470 471 /* Are any of the TX qdiscs changing? */ 472 static inline bool qdisc_tx_changing(const struct net_device *dev) 473 { 474 unsigned int i; 475 476 for (i = 0; i < dev->num_tx_queues; i++) { 477 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 478 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 479 return true; 480 } 481 return false; 482 } 483 484 /* Is the device using the noop qdisc on all queues? */ 485 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 486 { 487 unsigned int i; 488 489 for (i = 0; i < dev->num_tx_queues; i++) { 490 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 491 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 492 return false; 493 } 494 return true; 495 } 496 497 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 498 { 499 return qdisc_skb_cb(skb)->pkt_len; 500 } 501 502 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 503 enum net_xmit_qdisc_t { 504 __NET_XMIT_STOLEN = 0x00010000, 505 __NET_XMIT_BYPASS = 0x00020000, 506 }; 507 508 #ifdef CONFIG_NET_CLS_ACT 509 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 510 #else 511 #define net_xmit_drop_count(e) (1) 512 #endif 513 514 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 515 const struct Qdisc *sch) 516 { 517 #ifdef CONFIG_NET_SCHED 518 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 519 520 if (stab) 521 __qdisc_calculate_pkt_len(skb, stab); 522 #endif 523 } 524 525 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 526 struct sk_buff **to_free) 527 { 528 qdisc_calculate_pkt_len(skb, sch); 529 return sch->enqueue(skb, sch, to_free); 530 } 531 532 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 533 { 534 return q->flags & TCQ_F_CPUSTATS; 535 } 536 537 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 538 __u64 bytes, __u32 packets) 539 { 540 bstats->bytes += bytes; 541 bstats->packets += packets; 542 } 543 544 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 545 const struct sk_buff *skb) 546 { 547 _bstats_update(bstats, 548 qdisc_pkt_len(skb), 549 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 550 } 551 552 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 553 __u64 bytes, __u32 packets) 554 { 555 u64_stats_update_begin(&bstats->syncp); 556 _bstats_update(&bstats->bstats, bytes, packets); 557 u64_stats_update_end(&bstats->syncp); 558 } 559 560 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 561 const struct sk_buff *skb) 562 { 563 u64_stats_update_begin(&bstats->syncp); 564 bstats_update(&bstats->bstats, skb); 565 u64_stats_update_end(&bstats->syncp); 566 } 567 568 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 569 const struct sk_buff *skb) 570 { 571 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 572 } 573 574 static inline void qdisc_bstats_update(struct Qdisc *sch, 575 const struct sk_buff *skb) 576 { 577 bstats_update(&sch->bstats, skb); 578 } 579 580 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 581 const struct sk_buff *skb) 582 { 583 sch->qstats.backlog -= qdisc_pkt_len(skb); 584 } 585 586 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 587 const struct sk_buff *skb) 588 { 589 sch->qstats.backlog += qdisc_pkt_len(skb); 590 } 591 592 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 593 { 594 sch->qstats.drops += count; 595 } 596 597 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 598 { 599 qstats->drops++; 600 } 601 602 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 603 { 604 qstats->overlimits++; 605 } 606 607 static inline void qdisc_qstats_drop(struct Qdisc *sch) 608 { 609 qstats_drop_inc(&sch->qstats); 610 } 611 612 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 613 { 614 this_cpu_inc(sch->cpu_qstats->drops); 615 } 616 617 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 618 { 619 sch->qstats.overlimits++; 620 } 621 622 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 623 { 624 qh->head = NULL; 625 qh->tail = NULL; 626 qh->qlen = 0; 627 } 628 629 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 630 struct qdisc_skb_head *qh) 631 { 632 struct sk_buff *last = qh->tail; 633 634 if (last) { 635 skb->next = NULL; 636 last->next = skb; 637 qh->tail = skb; 638 } else { 639 qh->tail = skb; 640 qh->head = skb; 641 } 642 qh->qlen++; 643 qdisc_qstats_backlog_inc(sch, skb); 644 645 return NET_XMIT_SUCCESS; 646 } 647 648 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 649 { 650 return __qdisc_enqueue_tail(skb, sch, &sch->q); 651 } 652 653 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 654 { 655 struct sk_buff *skb = qh->head; 656 657 if (likely(skb != NULL)) { 658 qh->head = skb->next; 659 qh->qlen--; 660 if (qh->head == NULL) 661 qh->tail = NULL; 662 skb->next = NULL; 663 } 664 665 return skb; 666 } 667 668 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 669 { 670 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 671 672 if (likely(skb != NULL)) { 673 qdisc_qstats_backlog_dec(sch, skb); 674 qdisc_bstats_update(sch, skb); 675 } 676 677 return skb; 678 } 679 680 /* Instead of calling kfree_skb() while root qdisc lock is held, 681 * queue the skb for future freeing at end of __dev_xmit_skb() 682 */ 683 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 684 { 685 skb->next = *to_free; 686 *to_free = skb; 687 } 688 689 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 690 struct qdisc_skb_head *qh, 691 struct sk_buff **to_free) 692 { 693 struct sk_buff *skb = __qdisc_dequeue_head(qh); 694 695 if (likely(skb != NULL)) { 696 unsigned int len = qdisc_pkt_len(skb); 697 698 qdisc_qstats_backlog_dec(sch, skb); 699 __qdisc_drop(skb, to_free); 700 return len; 701 } 702 703 return 0; 704 } 705 706 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 707 struct sk_buff **to_free) 708 { 709 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 710 } 711 712 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 713 { 714 const struct qdisc_skb_head *qh = &sch->q; 715 716 return qh->head; 717 } 718 719 /* generic pseudo peek method for non-work-conserving qdisc */ 720 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 721 { 722 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 723 if (!sch->gso_skb) { 724 sch->gso_skb = sch->dequeue(sch); 725 if (sch->gso_skb) { 726 /* it's still part of the queue */ 727 qdisc_qstats_backlog_inc(sch, sch->gso_skb); 728 sch->q.qlen++; 729 } 730 } 731 732 return sch->gso_skb; 733 } 734 735 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 736 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 737 { 738 struct sk_buff *skb = sch->gso_skb; 739 740 if (skb) { 741 sch->gso_skb = NULL; 742 qdisc_qstats_backlog_dec(sch, skb); 743 sch->q.qlen--; 744 } else { 745 skb = sch->dequeue(sch); 746 } 747 748 return skb; 749 } 750 751 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 752 { 753 /* 754 * We do not know the backlog in bytes of this list, it 755 * is up to the caller to correct it 756 */ 757 ASSERT_RTNL(); 758 if (qh->qlen) { 759 rtnl_kfree_skbs(qh->head, qh->tail); 760 761 qh->head = NULL; 762 qh->tail = NULL; 763 qh->qlen = 0; 764 } 765 } 766 767 static inline void qdisc_reset_queue(struct Qdisc *sch) 768 { 769 __qdisc_reset_queue(&sch->q); 770 sch->qstats.backlog = 0; 771 } 772 773 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 774 struct Qdisc **pold) 775 { 776 struct Qdisc *old; 777 778 sch_tree_lock(sch); 779 old = *pold; 780 *pold = new; 781 if (old != NULL) { 782 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); 783 qdisc_reset(old); 784 } 785 sch_tree_unlock(sch); 786 787 return old; 788 } 789 790 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 791 { 792 rtnl_kfree_skbs(skb, skb); 793 qdisc_qstats_drop(sch); 794 } 795 796 797 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 798 struct sk_buff **to_free) 799 { 800 __qdisc_drop(skb, to_free); 801 qdisc_qstats_drop(sch); 802 803 return NET_XMIT_DROP; 804 } 805 806 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 807 long it will take to send a packet given its size. 808 */ 809 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 810 { 811 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 812 if (slot < 0) 813 slot = 0; 814 slot >>= rtab->rate.cell_log; 815 if (slot > 255) 816 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 817 return rtab->data[slot]; 818 } 819 820 struct psched_ratecfg { 821 u64 rate_bytes_ps; /* bytes per second */ 822 u32 mult; 823 u16 overhead; 824 u8 linklayer; 825 u8 shift; 826 }; 827 828 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 829 unsigned int len) 830 { 831 len += r->overhead; 832 833 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 834 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 835 836 return ((u64)len * r->mult) >> r->shift; 837 } 838 839 void psched_ratecfg_precompute(struct psched_ratecfg *r, 840 const struct tc_ratespec *conf, 841 u64 rate64); 842 843 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 844 const struct psched_ratecfg *r) 845 { 846 memset(res, 0, sizeof(*res)); 847 848 /* legacy struct tc_ratespec has a 32bit @rate field 849 * Qdisc using 64bit rate should add new attributes 850 * in order to maintain compatibility. 851 */ 852 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 853 854 res->overhead = r->overhead; 855 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 856 } 857 858 #endif 859