1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <linux/percpu.h> 10 #include <linux/dynamic_queue_limits.h> 11 #include <linux/list.h> 12 #include <linux/refcount.h> 13 #include <linux/workqueue.h> 14 #include <net/gen_stats.h> 15 #include <net/rtnetlink.h> 16 17 struct Qdisc_ops; 18 struct qdisc_walker; 19 struct tcf_walker; 20 struct module; 21 22 struct qdisc_rate_table { 23 struct tc_ratespec rate; 24 u32 data[256]; 25 struct qdisc_rate_table *next; 26 int refcnt; 27 }; 28 29 enum qdisc_state_t { 30 __QDISC_STATE_SCHED, 31 __QDISC_STATE_DEACTIVATED, 32 }; 33 34 struct qdisc_size_table { 35 struct rcu_head rcu; 36 struct list_head list; 37 struct tc_sizespec szopts; 38 int refcnt; 39 u16 data[]; 40 }; 41 42 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 43 struct qdisc_skb_head { 44 struct sk_buff *head; 45 struct sk_buff *tail; 46 __u32 qlen; 47 spinlock_t lock; 48 }; 49 50 struct Qdisc { 51 int (*enqueue)(struct sk_buff *skb, 52 struct Qdisc *sch, 53 struct sk_buff **to_free); 54 struct sk_buff * (*dequeue)(struct Qdisc *sch); 55 unsigned int flags; 56 #define TCQ_F_BUILTIN 1 57 #define TCQ_F_INGRESS 2 58 #define TCQ_F_CAN_BYPASS 4 59 #define TCQ_F_MQROOT 8 60 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 61 * q->dev_queue : It can test 62 * netif_xmit_frozen_or_stopped() before 63 * dequeueing next packet. 64 * Its true for MQ/MQPRIO slaves, or non 65 * multiqueue device. 66 */ 67 #define TCQ_F_WARN_NONWC (1 << 16) 68 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 69 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 70 * qdisc_tree_decrease_qlen() should stop. 71 */ 72 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 73 u32 limit; 74 const struct Qdisc_ops *ops; 75 struct qdisc_size_table __rcu *stab; 76 struct hlist_node hash; 77 u32 handle; 78 u32 parent; 79 80 struct netdev_queue *dev_queue; 81 82 struct net_rate_estimator __rcu *rate_est; 83 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 84 struct gnet_stats_queue __percpu *cpu_qstats; 85 86 /* 87 * For performance sake on SMP, we put highly modified fields at the end 88 */ 89 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 90 struct qdisc_skb_head q; 91 struct gnet_stats_basic_packed bstats; 92 seqcount_t running; 93 struct gnet_stats_queue qstats; 94 unsigned long state; 95 struct Qdisc *next_sched; 96 struct sk_buff *skb_bad_txq; 97 int padded; 98 refcount_t refcnt; 99 100 spinlock_t busylock ____cacheline_aligned_in_smp; 101 }; 102 103 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 104 { 105 if (qdisc->flags & TCQ_F_BUILTIN) 106 return; 107 refcount_inc(&qdisc->refcnt); 108 } 109 110 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 111 { 112 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 113 } 114 115 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 116 { 117 if (qdisc_is_running(qdisc)) 118 return false; 119 /* Variant of write_seqcount_begin() telling lockdep a trylock 120 * was attempted. 121 */ 122 raw_write_seqcount_begin(&qdisc->running); 123 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 124 return true; 125 } 126 127 static inline void qdisc_run_end(struct Qdisc *qdisc) 128 { 129 write_seqcount_end(&qdisc->running); 130 } 131 132 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 133 { 134 return qdisc->flags & TCQ_F_ONETXQUEUE; 135 } 136 137 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 138 { 139 #ifdef CONFIG_BQL 140 /* Non-BQL migrated drivers will return 0, too. */ 141 return dql_avail(&txq->dql); 142 #else 143 return 0; 144 #endif 145 } 146 147 struct Qdisc_class_ops { 148 /* Child qdisc manipulation */ 149 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 150 int (*graft)(struct Qdisc *, unsigned long cl, 151 struct Qdisc *, struct Qdisc **); 152 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 153 void (*qlen_notify)(struct Qdisc *, unsigned long); 154 155 /* Class manipulation routines */ 156 unsigned long (*find)(struct Qdisc *, u32 classid); 157 int (*change)(struct Qdisc *, u32, u32, 158 struct nlattr **, unsigned long *); 159 int (*delete)(struct Qdisc *, unsigned long); 160 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 161 162 /* Filter manipulation */ 163 struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); 164 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 165 u32 classid); 166 void (*unbind_tcf)(struct Qdisc *, unsigned long); 167 168 /* rtnetlink specific */ 169 int (*dump)(struct Qdisc *, unsigned long, 170 struct sk_buff *skb, struct tcmsg*); 171 int (*dump_stats)(struct Qdisc *, unsigned long, 172 struct gnet_dump *); 173 }; 174 175 struct Qdisc_ops { 176 struct Qdisc_ops *next; 177 const struct Qdisc_class_ops *cl_ops; 178 char id[IFNAMSIZ]; 179 int priv_size; 180 181 int (*enqueue)(struct sk_buff *skb, 182 struct Qdisc *sch, 183 struct sk_buff **to_free); 184 struct sk_buff * (*dequeue)(struct Qdisc *); 185 struct sk_buff * (*peek)(struct Qdisc *); 186 187 int (*init)(struct Qdisc *, struct nlattr *arg); 188 void (*reset)(struct Qdisc *); 189 void (*destroy)(struct Qdisc *); 190 int (*change)(struct Qdisc *, struct nlattr *arg); 191 void (*attach)(struct Qdisc *); 192 193 int (*dump)(struct Qdisc *, struct sk_buff *); 194 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 195 196 struct module *owner; 197 }; 198 199 200 struct tcf_result { 201 union { 202 struct { 203 unsigned long class; 204 u32 classid; 205 }; 206 const struct tcf_proto *goto_tp; 207 }; 208 }; 209 210 struct tcf_proto_ops { 211 struct list_head head; 212 char kind[IFNAMSIZ]; 213 214 int (*classify)(struct sk_buff *, 215 const struct tcf_proto *, 216 struct tcf_result *); 217 int (*init)(struct tcf_proto*); 218 void (*destroy)(struct tcf_proto*); 219 220 void* (*get)(struct tcf_proto*, u32 handle); 221 int (*change)(struct net *net, struct sk_buff *, 222 struct tcf_proto*, unsigned long, 223 u32 handle, struct nlattr **, 224 void **, bool); 225 int (*delete)(struct tcf_proto*, void *, bool*); 226 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 227 void (*bind_class)(void *, u32, unsigned long); 228 229 /* rtnetlink specific */ 230 int (*dump)(struct net*, struct tcf_proto*, void *, 231 struct sk_buff *skb, struct tcmsg*); 232 233 struct module *owner; 234 }; 235 236 struct tcf_proto { 237 /* Fast access part */ 238 struct tcf_proto __rcu *next; 239 void __rcu *root; 240 int (*classify)(struct sk_buff *, 241 const struct tcf_proto *, 242 struct tcf_result *); 243 __be16 protocol; 244 245 /* All the rest */ 246 u32 prio; 247 u32 classid; 248 struct Qdisc *q; 249 void *data; 250 const struct tcf_proto_ops *ops; 251 struct tcf_chain *chain; 252 struct rcu_head rcu; 253 }; 254 255 struct qdisc_skb_cb { 256 unsigned int pkt_len; 257 u16 slave_dev_queue_mapping; 258 u16 tc_classid; 259 #define QDISC_CB_PRIV_LEN 20 260 unsigned char data[QDISC_CB_PRIV_LEN]; 261 }; 262 263 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 264 265 struct tcf_chain { 266 struct tcf_proto __rcu *filter_chain; 267 tcf_chain_head_change_t *chain_head_change; 268 void *chain_head_change_priv; 269 struct list_head list; 270 struct tcf_block *block; 271 u32 index; /* chain index */ 272 unsigned int refcnt; 273 }; 274 275 struct tcf_block { 276 struct list_head chain_list; 277 struct net *net; 278 struct Qdisc *q; 279 struct list_head cb_list; 280 struct work_struct work; 281 }; 282 283 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 284 { 285 struct qdisc_skb_cb *qcb; 286 287 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 288 BUILD_BUG_ON(sizeof(qcb->data) < sz); 289 } 290 291 static inline int qdisc_qlen(const struct Qdisc *q) 292 { 293 return q->q.qlen; 294 } 295 296 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 297 { 298 return (struct qdisc_skb_cb *)skb->cb; 299 } 300 301 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 302 { 303 return &qdisc->q.lock; 304 } 305 306 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 307 { 308 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 309 310 return q; 311 } 312 313 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 314 { 315 return qdisc->dev_queue->qdisc_sleeping; 316 } 317 318 /* The qdisc root lock is a mechanism by which to top level 319 * of a qdisc tree can be locked from any qdisc node in the 320 * forest. This allows changing the configuration of some 321 * aspect of the qdisc tree while blocking out asynchronous 322 * qdisc access in the packet processing paths. 323 * 324 * It is only legal to do this when the root will not change 325 * on us. Otherwise we'll potentially lock the wrong qdisc 326 * root. This is enforced by holding the RTNL semaphore, which 327 * all users of this lock accessor must do. 328 */ 329 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 330 { 331 struct Qdisc *root = qdisc_root(qdisc); 332 333 ASSERT_RTNL(); 334 return qdisc_lock(root); 335 } 336 337 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 338 { 339 struct Qdisc *root = qdisc_root_sleeping(qdisc); 340 341 ASSERT_RTNL(); 342 return qdisc_lock(root); 343 } 344 345 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 346 { 347 struct Qdisc *root = qdisc_root_sleeping(qdisc); 348 349 ASSERT_RTNL(); 350 return &root->running; 351 } 352 353 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 354 { 355 return qdisc->dev_queue->dev; 356 } 357 358 static inline void sch_tree_lock(const struct Qdisc *q) 359 { 360 spin_lock_bh(qdisc_root_sleeping_lock(q)); 361 } 362 363 static inline void sch_tree_unlock(const struct Qdisc *q) 364 { 365 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 366 } 367 368 extern struct Qdisc noop_qdisc; 369 extern struct Qdisc_ops noop_qdisc_ops; 370 extern struct Qdisc_ops pfifo_fast_ops; 371 extern struct Qdisc_ops mq_qdisc_ops; 372 extern struct Qdisc_ops noqueue_qdisc_ops; 373 extern const struct Qdisc_ops *default_qdisc_ops; 374 static inline const struct Qdisc_ops * 375 get_default_qdisc_ops(const struct net_device *dev, int ntx) 376 { 377 return ntx < dev->real_num_tx_queues ? 378 default_qdisc_ops : &pfifo_fast_ops; 379 } 380 381 struct Qdisc_class_common { 382 u32 classid; 383 struct hlist_node hnode; 384 }; 385 386 struct Qdisc_class_hash { 387 struct hlist_head *hash; 388 unsigned int hashsize; 389 unsigned int hashmask; 390 unsigned int hashelems; 391 }; 392 393 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 394 { 395 id ^= id >> 8; 396 id ^= id >> 4; 397 return id & mask; 398 } 399 400 static inline struct Qdisc_class_common * 401 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 402 { 403 struct Qdisc_class_common *cl; 404 unsigned int h; 405 406 if (!id) 407 return NULL; 408 409 h = qdisc_class_hash(id, hash->hashmask); 410 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 411 if (cl->classid == id) 412 return cl; 413 } 414 return NULL; 415 } 416 417 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 418 { 419 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 420 421 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 422 } 423 424 int qdisc_class_hash_init(struct Qdisc_class_hash *); 425 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 426 struct Qdisc_class_common *); 427 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 428 struct Qdisc_class_common *); 429 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 430 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 431 432 void dev_init_scheduler(struct net_device *dev); 433 void dev_shutdown(struct net_device *dev); 434 void dev_activate(struct net_device *dev); 435 void dev_deactivate(struct net_device *dev); 436 void dev_deactivate_many(struct list_head *head); 437 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 438 struct Qdisc *qdisc); 439 void qdisc_reset(struct Qdisc *qdisc); 440 void qdisc_destroy(struct Qdisc *qdisc); 441 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 442 unsigned int len); 443 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 444 const struct Qdisc_ops *ops); 445 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 446 const struct Qdisc_ops *ops, u32 parentid); 447 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 448 const struct qdisc_size_table *stab); 449 int skb_do_redirect(struct sk_buff *); 450 451 static inline void skb_reset_tc(struct sk_buff *skb) 452 { 453 #ifdef CONFIG_NET_CLS_ACT 454 skb->tc_redirected = 0; 455 #endif 456 } 457 458 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 459 { 460 #ifdef CONFIG_NET_CLS_ACT 461 return skb->tc_at_ingress; 462 #else 463 return false; 464 #endif 465 } 466 467 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 468 { 469 #ifdef CONFIG_NET_CLS_ACT 470 if (skb->tc_skip_classify) { 471 skb->tc_skip_classify = 0; 472 return true; 473 } 474 #endif 475 return false; 476 } 477 478 /* Reset all TX qdiscs greater then index of a device. */ 479 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 480 { 481 struct Qdisc *qdisc; 482 483 for (; i < dev->num_tx_queues; i++) { 484 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 485 if (qdisc) { 486 spin_lock_bh(qdisc_lock(qdisc)); 487 qdisc_reset(qdisc); 488 spin_unlock_bh(qdisc_lock(qdisc)); 489 } 490 } 491 } 492 493 static inline void qdisc_reset_all_tx(struct net_device *dev) 494 { 495 qdisc_reset_all_tx_gt(dev, 0); 496 } 497 498 /* Are all TX queues of the device empty? */ 499 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 500 { 501 unsigned int i; 502 503 rcu_read_lock(); 504 for (i = 0; i < dev->num_tx_queues; i++) { 505 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 506 const struct Qdisc *q = rcu_dereference(txq->qdisc); 507 508 if (q->q.qlen) { 509 rcu_read_unlock(); 510 return false; 511 } 512 } 513 rcu_read_unlock(); 514 return true; 515 } 516 517 /* Are any of the TX qdiscs changing? */ 518 static inline bool qdisc_tx_changing(const struct net_device *dev) 519 { 520 unsigned int i; 521 522 for (i = 0; i < dev->num_tx_queues; i++) { 523 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 524 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 525 return true; 526 } 527 return false; 528 } 529 530 /* Is the device using the noop qdisc on all queues? */ 531 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 532 { 533 unsigned int i; 534 535 for (i = 0; i < dev->num_tx_queues; i++) { 536 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 537 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 538 return false; 539 } 540 return true; 541 } 542 543 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 544 { 545 return qdisc_skb_cb(skb)->pkt_len; 546 } 547 548 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 549 enum net_xmit_qdisc_t { 550 __NET_XMIT_STOLEN = 0x00010000, 551 __NET_XMIT_BYPASS = 0x00020000, 552 }; 553 554 #ifdef CONFIG_NET_CLS_ACT 555 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 556 #else 557 #define net_xmit_drop_count(e) (1) 558 #endif 559 560 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 561 const struct Qdisc *sch) 562 { 563 #ifdef CONFIG_NET_SCHED 564 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 565 566 if (stab) 567 __qdisc_calculate_pkt_len(skb, stab); 568 #endif 569 } 570 571 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 572 struct sk_buff **to_free) 573 { 574 qdisc_calculate_pkt_len(skb, sch); 575 return sch->enqueue(skb, sch, to_free); 576 } 577 578 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 579 { 580 return q->flags & TCQ_F_CPUSTATS; 581 } 582 583 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 584 __u64 bytes, __u32 packets) 585 { 586 bstats->bytes += bytes; 587 bstats->packets += packets; 588 } 589 590 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 591 const struct sk_buff *skb) 592 { 593 _bstats_update(bstats, 594 qdisc_pkt_len(skb), 595 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 596 } 597 598 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 599 __u64 bytes, __u32 packets) 600 { 601 u64_stats_update_begin(&bstats->syncp); 602 _bstats_update(&bstats->bstats, bytes, packets); 603 u64_stats_update_end(&bstats->syncp); 604 } 605 606 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 607 const struct sk_buff *skb) 608 { 609 u64_stats_update_begin(&bstats->syncp); 610 bstats_update(&bstats->bstats, skb); 611 u64_stats_update_end(&bstats->syncp); 612 } 613 614 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 615 const struct sk_buff *skb) 616 { 617 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 618 } 619 620 static inline void qdisc_bstats_update(struct Qdisc *sch, 621 const struct sk_buff *skb) 622 { 623 bstats_update(&sch->bstats, skb); 624 } 625 626 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 627 const struct sk_buff *skb) 628 { 629 sch->qstats.backlog -= qdisc_pkt_len(skb); 630 } 631 632 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 633 const struct sk_buff *skb) 634 { 635 sch->qstats.backlog += qdisc_pkt_len(skb); 636 } 637 638 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 639 { 640 sch->qstats.drops += count; 641 } 642 643 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 644 { 645 qstats->drops++; 646 } 647 648 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 649 { 650 qstats->overlimits++; 651 } 652 653 static inline void qdisc_qstats_drop(struct Qdisc *sch) 654 { 655 qstats_drop_inc(&sch->qstats); 656 } 657 658 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 659 { 660 this_cpu_inc(sch->cpu_qstats->drops); 661 } 662 663 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 664 { 665 sch->qstats.overlimits++; 666 } 667 668 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 669 { 670 qh->head = NULL; 671 qh->tail = NULL; 672 qh->qlen = 0; 673 } 674 675 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 676 struct qdisc_skb_head *qh) 677 { 678 struct sk_buff *last = qh->tail; 679 680 if (last) { 681 skb->next = NULL; 682 last->next = skb; 683 qh->tail = skb; 684 } else { 685 qh->tail = skb; 686 qh->head = skb; 687 } 688 qh->qlen++; 689 qdisc_qstats_backlog_inc(sch, skb); 690 691 return NET_XMIT_SUCCESS; 692 } 693 694 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 695 { 696 return __qdisc_enqueue_tail(skb, sch, &sch->q); 697 } 698 699 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 700 { 701 struct sk_buff *skb = qh->head; 702 703 if (likely(skb != NULL)) { 704 qh->head = skb->next; 705 qh->qlen--; 706 if (qh->head == NULL) 707 qh->tail = NULL; 708 skb->next = NULL; 709 } 710 711 return skb; 712 } 713 714 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 715 { 716 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 717 718 if (likely(skb != NULL)) { 719 qdisc_qstats_backlog_dec(sch, skb); 720 qdisc_bstats_update(sch, skb); 721 } 722 723 return skb; 724 } 725 726 /* Instead of calling kfree_skb() while root qdisc lock is held, 727 * queue the skb for future freeing at end of __dev_xmit_skb() 728 */ 729 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 730 { 731 skb->next = *to_free; 732 *to_free = skb; 733 } 734 735 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 736 struct qdisc_skb_head *qh, 737 struct sk_buff **to_free) 738 { 739 struct sk_buff *skb = __qdisc_dequeue_head(qh); 740 741 if (likely(skb != NULL)) { 742 unsigned int len = qdisc_pkt_len(skb); 743 744 qdisc_qstats_backlog_dec(sch, skb); 745 __qdisc_drop(skb, to_free); 746 return len; 747 } 748 749 return 0; 750 } 751 752 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 753 struct sk_buff **to_free) 754 { 755 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 756 } 757 758 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 759 { 760 const struct qdisc_skb_head *qh = &sch->q; 761 762 return qh->head; 763 } 764 765 /* generic pseudo peek method for non-work-conserving qdisc */ 766 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 767 { 768 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 769 if (!sch->gso_skb) { 770 sch->gso_skb = sch->dequeue(sch); 771 if (sch->gso_skb) { 772 /* it's still part of the queue */ 773 qdisc_qstats_backlog_inc(sch, sch->gso_skb); 774 sch->q.qlen++; 775 } 776 } 777 778 return sch->gso_skb; 779 } 780 781 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 782 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 783 { 784 struct sk_buff *skb = sch->gso_skb; 785 786 if (skb) { 787 sch->gso_skb = NULL; 788 qdisc_qstats_backlog_dec(sch, skb); 789 sch->q.qlen--; 790 } else { 791 skb = sch->dequeue(sch); 792 } 793 794 return skb; 795 } 796 797 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 798 { 799 /* 800 * We do not know the backlog in bytes of this list, it 801 * is up to the caller to correct it 802 */ 803 ASSERT_RTNL(); 804 if (qh->qlen) { 805 rtnl_kfree_skbs(qh->head, qh->tail); 806 807 qh->head = NULL; 808 qh->tail = NULL; 809 qh->qlen = 0; 810 } 811 } 812 813 static inline void qdisc_reset_queue(struct Qdisc *sch) 814 { 815 __qdisc_reset_queue(&sch->q); 816 sch->qstats.backlog = 0; 817 } 818 819 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 820 struct Qdisc **pold) 821 { 822 struct Qdisc *old; 823 824 sch_tree_lock(sch); 825 old = *pold; 826 *pold = new; 827 if (old != NULL) { 828 unsigned int qlen = old->q.qlen; 829 unsigned int backlog = old->qstats.backlog; 830 831 qdisc_reset(old); 832 qdisc_tree_reduce_backlog(old, qlen, backlog); 833 } 834 sch_tree_unlock(sch); 835 836 return old; 837 } 838 839 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 840 { 841 rtnl_kfree_skbs(skb, skb); 842 qdisc_qstats_drop(sch); 843 } 844 845 846 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 847 struct sk_buff **to_free) 848 { 849 __qdisc_drop(skb, to_free); 850 qdisc_qstats_drop(sch); 851 852 return NET_XMIT_DROP; 853 } 854 855 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 856 long it will take to send a packet given its size. 857 */ 858 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 859 { 860 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 861 if (slot < 0) 862 slot = 0; 863 slot >>= rtab->rate.cell_log; 864 if (slot > 255) 865 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 866 return rtab->data[slot]; 867 } 868 869 struct psched_ratecfg { 870 u64 rate_bytes_ps; /* bytes per second */ 871 u32 mult; 872 u16 overhead; 873 u8 linklayer; 874 u8 shift; 875 }; 876 877 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 878 unsigned int len) 879 { 880 len += r->overhead; 881 882 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 883 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 884 885 return ((u64)len * r->mult) >> r->shift; 886 } 887 888 void psched_ratecfg_precompute(struct psched_ratecfg *r, 889 const struct tc_ratespec *conf, 890 u64 rate64); 891 892 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 893 const struct psched_ratecfg *r) 894 { 895 memset(res, 0, sizeof(*res)); 896 897 /* legacy struct tc_ratespec has a 32bit @rate field 898 * Qdisc using 64bit rate should add new attributes 899 * in order to maintain compatibility. 900 */ 901 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 902 903 res->overhead = r->overhead; 904 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 905 } 906 907 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 908 * The fast path only needs to access filter list and to update stats 909 */ 910 struct mini_Qdisc { 911 struct tcf_proto *filter_list; 912 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 913 struct gnet_stats_queue __percpu *cpu_qstats; 914 struct rcu_head rcu; 915 }; 916 917 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 918 const struct sk_buff *skb) 919 { 920 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 921 } 922 923 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 924 { 925 this_cpu_inc(miniq->cpu_qstats->drops); 926 } 927 928 struct mini_Qdisc_pair { 929 struct mini_Qdisc miniq1; 930 struct mini_Qdisc miniq2; 931 struct mini_Qdisc __rcu **p_miniq; 932 }; 933 934 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 935 struct tcf_proto *tp_head); 936 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 937 struct mini_Qdisc __rcu **p_miniq); 938 939 #endif 940