1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/pkt_sched.h> 8 #include <linux/pkt_cls.h> 9 #include <linux/percpu.h> 10 #include <linux/dynamic_queue_limits.h> 11 #include <net/gen_stats.h> 12 #include <net/rtnetlink.h> 13 14 struct Qdisc_ops; 15 struct qdisc_walker; 16 struct tcf_walker; 17 struct module; 18 19 struct qdisc_rate_table { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 enum qdisc_state_t { 27 __QDISC_STATE_SCHED, 28 __QDISC_STATE_DEACTIVATED, 29 __QDISC_STATE_THROTTLED, 30 }; 31 32 /* 33 * following bits are only changed while qdisc lock is held 34 */ 35 enum qdisc___state_t { 36 __QDISC___STATE_RUNNING = 1, 37 }; 38 39 struct qdisc_size_table { 40 struct rcu_head rcu; 41 struct list_head list; 42 struct tc_sizespec szopts; 43 int refcnt; 44 u16 data[]; 45 }; 46 47 struct Qdisc { 48 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 49 struct sk_buff * (*dequeue)(struct Qdisc *dev); 50 unsigned int flags; 51 #define TCQ_F_BUILTIN 1 52 #define TCQ_F_INGRESS 2 53 #define TCQ_F_CAN_BYPASS 4 54 #define TCQ_F_MQROOT 8 55 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 56 * q->dev_queue : It can test 57 * netif_xmit_frozen_or_stopped() before 58 * dequeueing next packet. 59 * Its true for MQ/MQPRIO slaves, or non 60 * multiqueue device. 61 */ 62 #define TCQ_F_WARN_NONWC (1 << 16) 63 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 64 u32 limit; 65 const struct Qdisc_ops *ops; 66 struct qdisc_size_table __rcu *stab; 67 struct list_head list; 68 u32 handle; 69 u32 parent; 70 int (*reshape_fail)(struct sk_buff *skb, 71 struct Qdisc *q); 72 73 void *u32_node; 74 75 /* This field is deprecated, but it is still used by CBQ 76 * and it will live until better solution will be invented. 77 */ 78 struct Qdisc *__parent; 79 struct netdev_queue *dev_queue; 80 81 struct gnet_stats_rate_est64 rate_est; 82 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 83 struct gnet_stats_queue __percpu *cpu_qstats; 84 85 struct Qdisc *next_sched; 86 struct sk_buff *gso_skb; 87 /* 88 * For performance sake on SMP, we put highly modified fields at the end 89 */ 90 unsigned long state; 91 struct sk_buff_head q; 92 struct gnet_stats_basic_packed bstats; 93 unsigned int __state; 94 struct gnet_stats_queue qstats; 95 struct rcu_head rcu_head; 96 int padded; 97 atomic_t refcnt; 98 99 spinlock_t busylock ____cacheline_aligned_in_smp; 100 }; 101 102 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 103 { 104 return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false; 105 } 106 107 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 108 { 109 if (qdisc_is_running(qdisc)) 110 return false; 111 qdisc->__state |= __QDISC___STATE_RUNNING; 112 return true; 113 } 114 115 static inline void qdisc_run_end(struct Qdisc *qdisc) 116 { 117 qdisc->__state &= ~__QDISC___STATE_RUNNING; 118 } 119 120 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 121 { 122 return qdisc->flags & TCQ_F_ONETXQUEUE; 123 } 124 125 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 126 { 127 #ifdef CONFIG_BQL 128 /* Non-BQL migrated drivers will return 0, too. */ 129 return dql_avail(&txq->dql); 130 #else 131 return 0; 132 #endif 133 } 134 135 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 136 { 137 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; 138 } 139 140 static inline void qdisc_throttled(struct Qdisc *qdisc) 141 { 142 set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 143 } 144 145 static inline void qdisc_unthrottled(struct Qdisc *qdisc) 146 { 147 clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 148 } 149 150 struct Qdisc_class_ops { 151 /* Child qdisc manipulation */ 152 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 153 int (*graft)(struct Qdisc *, unsigned long cl, 154 struct Qdisc *, struct Qdisc **); 155 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 156 void (*qlen_notify)(struct Qdisc *, unsigned long); 157 158 /* Class manipulation routines */ 159 unsigned long (*get)(struct Qdisc *, u32 classid); 160 void (*put)(struct Qdisc *, unsigned long); 161 int (*change)(struct Qdisc *, u32, u32, 162 struct nlattr **, unsigned long *); 163 int (*delete)(struct Qdisc *, unsigned long); 164 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 165 166 /* Filter manipulation */ 167 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 168 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 169 u32 classid); 170 void (*unbind_tcf)(struct Qdisc *, unsigned long); 171 172 /* rtnetlink specific */ 173 int (*dump)(struct Qdisc *, unsigned long, 174 struct sk_buff *skb, struct tcmsg*); 175 int (*dump_stats)(struct Qdisc *, unsigned long, 176 struct gnet_dump *); 177 }; 178 179 struct Qdisc_ops { 180 struct Qdisc_ops *next; 181 const struct Qdisc_class_ops *cl_ops; 182 char id[IFNAMSIZ]; 183 int priv_size; 184 185 int (*enqueue)(struct sk_buff *, struct Qdisc *); 186 struct sk_buff * (*dequeue)(struct Qdisc *); 187 struct sk_buff * (*peek)(struct Qdisc *); 188 unsigned int (*drop)(struct Qdisc *); 189 190 int (*init)(struct Qdisc *, struct nlattr *arg); 191 void (*reset)(struct Qdisc *); 192 void (*destroy)(struct Qdisc *); 193 int (*change)(struct Qdisc *, struct nlattr *arg); 194 void (*attach)(struct Qdisc *); 195 196 int (*dump)(struct Qdisc *, struct sk_buff *); 197 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 198 199 struct module *owner; 200 }; 201 202 203 struct tcf_result { 204 unsigned long class; 205 u32 classid; 206 }; 207 208 struct tcf_proto_ops { 209 struct list_head head; 210 char kind[IFNAMSIZ]; 211 212 int (*classify)(struct sk_buff *, 213 const struct tcf_proto *, 214 struct tcf_result *); 215 int (*init)(struct tcf_proto*); 216 bool (*destroy)(struct tcf_proto*, bool); 217 218 unsigned long (*get)(struct tcf_proto*, u32 handle); 219 int (*change)(struct net *net, struct sk_buff *, 220 struct tcf_proto*, unsigned long, 221 u32 handle, struct nlattr **, 222 unsigned long *, bool); 223 int (*delete)(struct tcf_proto*, unsigned long); 224 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 225 226 /* rtnetlink specific */ 227 int (*dump)(struct net*, struct tcf_proto*, unsigned long, 228 struct sk_buff *skb, struct tcmsg*); 229 230 struct module *owner; 231 }; 232 233 struct tcf_proto { 234 /* Fast access part */ 235 struct tcf_proto __rcu *next; 236 void __rcu *root; 237 int (*classify)(struct sk_buff *, 238 const struct tcf_proto *, 239 struct tcf_result *); 240 __be16 protocol; 241 242 /* All the rest */ 243 u32 prio; 244 u32 classid; 245 struct Qdisc *q; 246 void *data; 247 const struct tcf_proto_ops *ops; 248 struct rcu_head rcu; 249 }; 250 251 struct qdisc_skb_cb { 252 unsigned int pkt_len; 253 u16 slave_dev_queue_mapping; 254 u16 _pad; 255 #define QDISC_CB_PRIV_LEN 20 256 unsigned char data[QDISC_CB_PRIV_LEN]; 257 }; 258 259 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 260 { 261 struct qdisc_skb_cb *qcb; 262 263 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 264 BUILD_BUG_ON(sizeof(qcb->data) < sz); 265 } 266 267 static inline int qdisc_qlen(const struct Qdisc *q) 268 { 269 return q->q.qlen; 270 } 271 272 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 273 { 274 return (struct qdisc_skb_cb *)skb->cb; 275 } 276 277 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 278 { 279 return &qdisc->q.lock; 280 } 281 282 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 283 { 284 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 285 286 return q; 287 } 288 289 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 290 { 291 return qdisc->dev_queue->qdisc_sleeping; 292 } 293 294 /* The qdisc root lock is a mechanism by which to top level 295 * of a qdisc tree can be locked from any qdisc node in the 296 * forest. This allows changing the configuration of some 297 * aspect of the qdisc tree while blocking out asynchronous 298 * qdisc access in the packet processing paths. 299 * 300 * It is only legal to do this when the root will not change 301 * on us. Otherwise we'll potentially lock the wrong qdisc 302 * root. This is enforced by holding the RTNL semaphore, which 303 * all users of this lock accessor must do. 304 */ 305 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 306 { 307 struct Qdisc *root = qdisc_root(qdisc); 308 309 ASSERT_RTNL(); 310 return qdisc_lock(root); 311 } 312 313 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 314 { 315 struct Qdisc *root = qdisc_root_sleeping(qdisc); 316 317 ASSERT_RTNL(); 318 return qdisc_lock(root); 319 } 320 321 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 322 { 323 return qdisc->dev_queue->dev; 324 } 325 326 static inline void sch_tree_lock(const struct Qdisc *q) 327 { 328 spin_lock_bh(qdisc_root_sleeping_lock(q)); 329 } 330 331 static inline void sch_tree_unlock(const struct Qdisc *q) 332 { 333 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 334 } 335 336 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 337 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 338 339 extern struct Qdisc noop_qdisc; 340 extern struct Qdisc_ops noop_qdisc_ops; 341 extern struct Qdisc_ops pfifo_fast_ops; 342 extern struct Qdisc_ops mq_qdisc_ops; 343 extern const struct Qdisc_ops *default_qdisc_ops; 344 345 struct Qdisc_class_common { 346 u32 classid; 347 struct hlist_node hnode; 348 }; 349 350 struct Qdisc_class_hash { 351 struct hlist_head *hash; 352 unsigned int hashsize; 353 unsigned int hashmask; 354 unsigned int hashelems; 355 }; 356 357 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 358 { 359 id ^= id >> 8; 360 id ^= id >> 4; 361 return id & mask; 362 } 363 364 static inline struct Qdisc_class_common * 365 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 366 { 367 struct Qdisc_class_common *cl; 368 unsigned int h; 369 370 h = qdisc_class_hash(id, hash->hashmask); 371 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 372 if (cl->classid == id) 373 return cl; 374 } 375 return NULL; 376 } 377 378 int qdisc_class_hash_init(struct Qdisc_class_hash *); 379 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 380 struct Qdisc_class_common *); 381 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 382 struct Qdisc_class_common *); 383 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 384 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 385 386 void dev_init_scheduler(struct net_device *dev); 387 void dev_shutdown(struct net_device *dev); 388 void dev_activate(struct net_device *dev); 389 void dev_deactivate(struct net_device *dev); 390 void dev_deactivate_many(struct list_head *head); 391 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 392 struct Qdisc *qdisc); 393 void qdisc_reset(struct Qdisc *qdisc); 394 void qdisc_destroy(struct Qdisc *qdisc); 395 void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 396 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 397 const struct Qdisc_ops *ops); 398 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 399 const struct Qdisc_ops *ops, u32 parentid); 400 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 401 const struct qdisc_size_table *stab); 402 bool tcf_destroy(struct tcf_proto *tp, bool force); 403 void tcf_destroy_chain(struct tcf_proto __rcu **fl); 404 405 /* Reset all TX qdiscs greater then index of a device. */ 406 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 407 { 408 struct Qdisc *qdisc; 409 410 for (; i < dev->num_tx_queues; i++) { 411 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 412 if (qdisc) { 413 spin_lock_bh(qdisc_lock(qdisc)); 414 qdisc_reset(qdisc); 415 spin_unlock_bh(qdisc_lock(qdisc)); 416 } 417 } 418 } 419 420 static inline void qdisc_reset_all_tx(struct net_device *dev) 421 { 422 qdisc_reset_all_tx_gt(dev, 0); 423 } 424 425 /* Are all TX queues of the device empty? */ 426 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 427 { 428 unsigned int i; 429 430 rcu_read_lock(); 431 for (i = 0; i < dev->num_tx_queues; i++) { 432 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 433 const struct Qdisc *q = rcu_dereference(txq->qdisc); 434 435 if (q->q.qlen) { 436 rcu_read_unlock(); 437 return false; 438 } 439 } 440 rcu_read_unlock(); 441 return true; 442 } 443 444 /* Are any of the TX qdiscs changing? */ 445 static inline bool qdisc_tx_changing(const struct net_device *dev) 446 { 447 unsigned int i; 448 449 for (i = 0; i < dev->num_tx_queues; i++) { 450 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 451 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 452 return true; 453 } 454 return false; 455 } 456 457 /* Is the device using the noop qdisc on all queues? */ 458 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 459 { 460 unsigned int i; 461 462 for (i = 0; i < dev->num_tx_queues; i++) { 463 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 464 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 465 return false; 466 } 467 return true; 468 } 469 470 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 471 { 472 return qdisc_skb_cb(skb)->pkt_len; 473 } 474 475 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 476 enum net_xmit_qdisc_t { 477 __NET_XMIT_STOLEN = 0x00010000, 478 __NET_XMIT_BYPASS = 0x00020000, 479 }; 480 481 #ifdef CONFIG_NET_CLS_ACT 482 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 483 #else 484 #define net_xmit_drop_count(e) (1) 485 #endif 486 487 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 488 const struct Qdisc *sch) 489 { 490 #ifdef CONFIG_NET_SCHED 491 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 492 493 if (stab) 494 __qdisc_calculate_pkt_len(skb, stab); 495 #endif 496 } 497 498 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 499 { 500 qdisc_calculate_pkt_len(skb, sch); 501 return sch->enqueue(skb, sch); 502 } 503 504 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) 505 { 506 qdisc_skb_cb(skb)->pkt_len = skb->len; 507 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 508 } 509 510 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 511 { 512 return q->flags & TCQ_F_CPUSTATS; 513 } 514 515 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 516 const struct sk_buff *skb) 517 { 518 bstats->bytes += qdisc_pkt_len(skb); 519 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 520 } 521 522 static inline void qdisc_bstats_update_cpu(struct Qdisc *sch, 523 const struct sk_buff *skb) 524 { 525 struct gnet_stats_basic_cpu *bstats = 526 this_cpu_ptr(sch->cpu_bstats); 527 528 u64_stats_update_begin(&bstats->syncp); 529 bstats_update(&bstats->bstats, skb); 530 u64_stats_update_end(&bstats->syncp); 531 } 532 533 static inline void qdisc_bstats_update(struct Qdisc *sch, 534 const struct sk_buff *skb) 535 { 536 bstats_update(&sch->bstats, skb); 537 } 538 539 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 540 const struct sk_buff *skb) 541 { 542 sch->qstats.backlog -= qdisc_pkt_len(skb); 543 } 544 545 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 546 const struct sk_buff *skb) 547 { 548 sch->qstats.backlog += qdisc_pkt_len(skb); 549 } 550 551 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 552 { 553 sch->qstats.drops += count; 554 } 555 556 static inline void qdisc_qstats_drop(struct Qdisc *sch) 557 { 558 sch->qstats.drops++; 559 } 560 561 static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch) 562 { 563 struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats); 564 565 qstats->drops++; 566 } 567 568 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 569 { 570 sch->qstats.overlimits++; 571 } 572 573 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 574 struct sk_buff_head *list) 575 { 576 __skb_queue_tail(list, skb); 577 qdisc_qstats_backlog_inc(sch, skb); 578 579 return NET_XMIT_SUCCESS; 580 } 581 582 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 583 { 584 return __qdisc_enqueue_tail(skb, sch, &sch->q); 585 } 586 587 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 588 struct sk_buff_head *list) 589 { 590 struct sk_buff *skb = __skb_dequeue(list); 591 592 if (likely(skb != NULL)) { 593 qdisc_qstats_backlog_dec(sch, skb); 594 qdisc_bstats_update(sch, skb); 595 } 596 597 return skb; 598 } 599 600 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 601 { 602 return __qdisc_dequeue_head(sch, &sch->q); 603 } 604 605 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 606 struct sk_buff_head *list) 607 { 608 struct sk_buff *skb = __skb_dequeue(list); 609 610 if (likely(skb != NULL)) { 611 unsigned int len = qdisc_pkt_len(skb); 612 qdisc_qstats_backlog_dec(sch, skb); 613 kfree_skb(skb); 614 return len; 615 } 616 617 return 0; 618 } 619 620 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 621 { 622 return __qdisc_queue_drop_head(sch, &sch->q); 623 } 624 625 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 626 struct sk_buff_head *list) 627 { 628 struct sk_buff *skb = __skb_dequeue_tail(list); 629 630 if (likely(skb != NULL)) 631 qdisc_qstats_backlog_dec(sch, skb); 632 633 return skb; 634 } 635 636 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 637 { 638 return __qdisc_dequeue_tail(sch, &sch->q); 639 } 640 641 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 642 { 643 return skb_peek(&sch->q); 644 } 645 646 /* generic pseudo peek method for non-work-conserving qdisc */ 647 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 648 { 649 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 650 if (!sch->gso_skb) { 651 sch->gso_skb = sch->dequeue(sch); 652 if (sch->gso_skb) 653 /* it's still part of the queue */ 654 sch->q.qlen++; 655 } 656 657 return sch->gso_skb; 658 } 659 660 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 661 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 662 { 663 struct sk_buff *skb = sch->gso_skb; 664 665 if (skb) { 666 sch->gso_skb = NULL; 667 sch->q.qlen--; 668 } else { 669 skb = sch->dequeue(sch); 670 } 671 672 return skb; 673 } 674 675 static inline void __qdisc_reset_queue(struct Qdisc *sch, 676 struct sk_buff_head *list) 677 { 678 /* 679 * We do not know the backlog in bytes of this list, it 680 * is up to the caller to correct it 681 */ 682 __skb_queue_purge(list); 683 } 684 685 static inline void qdisc_reset_queue(struct Qdisc *sch) 686 { 687 __qdisc_reset_queue(sch, &sch->q); 688 sch->qstats.backlog = 0; 689 } 690 691 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 692 struct sk_buff_head *list) 693 { 694 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 695 696 if (likely(skb != NULL)) { 697 unsigned int len = qdisc_pkt_len(skb); 698 kfree_skb(skb); 699 return len; 700 } 701 702 return 0; 703 } 704 705 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 706 { 707 return __qdisc_queue_drop(sch, &sch->q); 708 } 709 710 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 711 { 712 kfree_skb(skb); 713 qdisc_qstats_drop(sch); 714 715 return NET_XMIT_DROP; 716 } 717 718 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 719 { 720 qdisc_qstats_drop(sch); 721 722 #ifdef CONFIG_NET_CLS_ACT 723 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 724 goto drop; 725 726 return NET_XMIT_SUCCESS; 727 728 drop: 729 #endif 730 kfree_skb(skb); 731 return NET_XMIT_DROP; 732 } 733 734 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 735 long it will take to send a packet given its size. 736 */ 737 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 738 { 739 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 740 if (slot < 0) 741 slot = 0; 742 slot >>= rtab->rate.cell_log; 743 if (slot > 255) 744 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 745 return rtab->data[slot]; 746 } 747 748 #ifdef CONFIG_NET_CLS_ACT 749 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, 750 int action) 751 { 752 struct sk_buff *n; 753 754 n = skb_clone(skb, gfp_mask); 755 756 if (n) { 757 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 758 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 759 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 760 } 761 return n; 762 } 763 #endif 764 765 struct psched_ratecfg { 766 u64 rate_bytes_ps; /* bytes per second */ 767 u32 mult; 768 u16 overhead; 769 u8 linklayer; 770 u8 shift; 771 }; 772 773 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 774 unsigned int len) 775 { 776 len += r->overhead; 777 778 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 779 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 780 781 return ((u64)len * r->mult) >> r->shift; 782 } 783 784 void psched_ratecfg_precompute(struct psched_ratecfg *r, 785 const struct tc_ratespec *conf, 786 u64 rate64); 787 788 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 789 const struct psched_ratecfg *r) 790 { 791 memset(res, 0, sizeof(*res)); 792 793 /* legacy struct tc_ratespec has a 32bit @rate field 794 * Qdisc using 64bit rate should add new attributes 795 * in order to maintain compatibility. 796 */ 797 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 798 799 res->overhead = r->overhead; 800 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 801 } 802 803 #endif 804