1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <net/gen_stats.h> 16 #include <net/rtnetlink.h> 17 18 struct Qdisc_ops; 19 struct qdisc_walker; 20 struct tcf_walker; 21 struct module; 22 23 struct qdisc_rate_table { 24 struct tc_ratespec rate; 25 u32 data[256]; 26 struct qdisc_rate_table *next; 27 int refcnt; 28 }; 29 30 enum qdisc_state_t { 31 __QDISC_STATE_SCHED, 32 __QDISC_STATE_DEACTIVATED, 33 }; 34 35 struct qdisc_size_table { 36 struct rcu_head rcu; 37 struct list_head list; 38 struct tc_sizespec szopts; 39 int refcnt; 40 u16 data[]; 41 }; 42 43 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 44 struct qdisc_skb_head { 45 struct sk_buff *head; 46 struct sk_buff *tail; 47 __u32 qlen; 48 spinlock_t lock; 49 }; 50 51 struct Qdisc { 52 int (*enqueue)(struct sk_buff *skb, 53 struct Qdisc *sch, 54 struct sk_buff **to_free); 55 struct sk_buff * (*dequeue)(struct Qdisc *sch); 56 unsigned int flags; 57 #define TCQ_F_BUILTIN 1 58 #define TCQ_F_INGRESS 2 59 #define TCQ_F_CAN_BYPASS 4 60 #define TCQ_F_MQROOT 8 61 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 62 * q->dev_queue : It can test 63 * netif_xmit_frozen_or_stopped() before 64 * dequeueing next packet. 65 * Its true for MQ/MQPRIO slaves, or non 66 * multiqueue device. 67 */ 68 #define TCQ_F_WARN_NONWC (1 << 16) 69 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 70 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 71 * qdisc_tree_decrease_qlen() should stop. 72 */ 73 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 74 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 75 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 76 u32 limit; 77 const struct Qdisc_ops *ops; 78 struct qdisc_size_table __rcu *stab; 79 struct hlist_node hash; 80 u32 handle; 81 u32 parent; 82 83 struct netdev_queue *dev_queue; 84 85 struct net_rate_estimator __rcu *rate_est; 86 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 87 struct gnet_stats_queue __percpu *cpu_qstats; 88 89 /* 90 * For performance sake on SMP, we put highly modified fields at the end 91 */ 92 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 93 struct qdisc_skb_head q; 94 struct gnet_stats_basic_packed bstats; 95 seqcount_t running; 96 struct gnet_stats_queue qstats; 97 unsigned long state; 98 struct Qdisc *next_sched; 99 struct sk_buff_head skb_bad_txq; 100 int padded; 101 refcount_t refcnt; 102 103 spinlock_t busylock ____cacheline_aligned_in_smp; 104 }; 105 106 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 107 { 108 if (qdisc->flags & TCQ_F_BUILTIN) 109 return; 110 refcount_inc(&qdisc->refcnt); 111 } 112 113 static inline bool qdisc_is_running(const struct Qdisc *qdisc) 114 { 115 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 116 } 117 118 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 119 { 120 if (qdisc_is_running(qdisc)) 121 return false; 122 /* Variant of write_seqcount_begin() telling lockdep a trylock 123 * was attempted. 124 */ 125 raw_write_seqcount_begin(&qdisc->running); 126 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 127 return true; 128 } 129 130 static inline void qdisc_run_end(struct Qdisc *qdisc) 131 { 132 write_seqcount_end(&qdisc->running); 133 } 134 135 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 136 { 137 return qdisc->flags & TCQ_F_ONETXQUEUE; 138 } 139 140 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 141 { 142 #ifdef CONFIG_BQL 143 /* Non-BQL migrated drivers will return 0, too. */ 144 return dql_avail(&txq->dql); 145 #else 146 return 0; 147 #endif 148 } 149 150 struct Qdisc_class_ops { 151 /* Child qdisc manipulation */ 152 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 153 int (*graft)(struct Qdisc *, unsigned long cl, 154 struct Qdisc *, struct Qdisc **, 155 struct netlink_ext_ack *extack); 156 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 157 void (*qlen_notify)(struct Qdisc *, unsigned long); 158 159 /* Class manipulation routines */ 160 unsigned long (*find)(struct Qdisc *, u32 classid); 161 int (*change)(struct Qdisc *, u32, u32, 162 struct nlattr **, unsigned long *, 163 struct netlink_ext_ack *); 164 int (*delete)(struct Qdisc *, unsigned long); 165 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 166 167 /* Filter manipulation */ 168 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 169 unsigned long arg, 170 struct netlink_ext_ack *extack); 171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 u32 classid); 173 void (*unbind_tcf)(struct Qdisc *, unsigned long); 174 175 /* rtnetlink specific */ 176 int (*dump)(struct Qdisc *, unsigned long, 177 struct sk_buff *skb, struct tcmsg*); 178 int (*dump_stats)(struct Qdisc *, unsigned long, 179 struct gnet_dump *); 180 }; 181 182 struct Qdisc_ops { 183 struct Qdisc_ops *next; 184 const struct Qdisc_class_ops *cl_ops; 185 char id[IFNAMSIZ]; 186 int priv_size; 187 unsigned int static_flags; 188 189 int (*enqueue)(struct sk_buff *skb, 190 struct Qdisc *sch, 191 struct sk_buff **to_free); 192 struct sk_buff * (*dequeue)(struct Qdisc *); 193 struct sk_buff * (*peek)(struct Qdisc *); 194 195 int (*init)(struct Qdisc *sch, struct nlattr *arg, 196 struct netlink_ext_ack *extack); 197 void (*reset)(struct Qdisc *); 198 void (*destroy)(struct Qdisc *); 199 int (*change)(struct Qdisc *sch, 200 struct nlattr *arg, 201 struct netlink_ext_ack *extack); 202 void (*attach)(struct Qdisc *sch); 203 204 int (*dump)(struct Qdisc *, struct sk_buff *); 205 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 206 207 struct module *owner; 208 }; 209 210 211 struct tcf_result { 212 union { 213 struct { 214 unsigned long class; 215 u32 classid; 216 }; 217 const struct tcf_proto *goto_tp; 218 }; 219 }; 220 221 struct tcf_proto_ops { 222 struct list_head head; 223 char kind[IFNAMSIZ]; 224 225 int (*classify)(struct sk_buff *, 226 const struct tcf_proto *, 227 struct tcf_result *); 228 int (*init)(struct tcf_proto*); 229 void (*destroy)(struct tcf_proto*); 230 231 void* (*get)(struct tcf_proto*, u32 handle); 232 int (*change)(struct net *net, struct sk_buff *, 233 struct tcf_proto*, unsigned long, 234 u32 handle, struct nlattr **, 235 void **, bool); 236 int (*delete)(struct tcf_proto*, void *, bool*); 237 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 238 void (*bind_class)(void *, u32, unsigned long); 239 240 /* rtnetlink specific */ 241 int (*dump)(struct net*, struct tcf_proto*, void *, 242 struct sk_buff *skb, struct tcmsg*); 243 244 struct module *owner; 245 }; 246 247 struct tcf_proto { 248 /* Fast access part */ 249 struct tcf_proto __rcu *next; 250 void __rcu *root; 251 int (*classify)(struct sk_buff *, 252 const struct tcf_proto *, 253 struct tcf_result *); 254 __be16 protocol; 255 256 /* All the rest */ 257 u32 prio; 258 u32 classid; 259 struct Qdisc *q; 260 void *data; 261 const struct tcf_proto_ops *ops; 262 struct tcf_chain *chain; 263 struct rcu_head rcu; 264 }; 265 266 struct qdisc_skb_cb { 267 unsigned int pkt_len; 268 u16 slave_dev_queue_mapping; 269 u16 tc_classid; 270 #define QDISC_CB_PRIV_LEN 20 271 unsigned char data[QDISC_CB_PRIV_LEN]; 272 }; 273 274 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 275 276 struct tcf_chain { 277 struct tcf_proto __rcu *filter_chain; 278 struct list_head filter_chain_list; 279 struct list_head list; 280 struct tcf_block *block; 281 u32 index; /* chain index */ 282 unsigned int refcnt; 283 }; 284 285 struct tcf_block { 286 struct list_head chain_list; 287 u32 index; /* block index for shared blocks */ 288 unsigned int refcnt; 289 struct net *net; 290 struct Qdisc *q; 291 struct list_head cb_list; 292 struct list_head owner_list; 293 bool keep_dst; 294 }; 295 296 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 297 { 298 struct qdisc_skb_cb *qcb; 299 300 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 301 BUILD_BUG_ON(sizeof(qcb->data) < sz); 302 } 303 304 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 305 { 306 return this_cpu_ptr(q->cpu_qstats)->qlen; 307 } 308 309 static inline int qdisc_qlen(const struct Qdisc *q) 310 { 311 return q->q.qlen; 312 } 313 314 static inline int qdisc_qlen_sum(const struct Qdisc *q) 315 { 316 __u32 qlen = 0; 317 int i; 318 319 if (q->flags & TCQ_F_NOLOCK) { 320 for_each_possible_cpu(i) 321 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 322 } else { 323 qlen = q->q.qlen; 324 } 325 326 return qlen; 327 } 328 329 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 330 { 331 return (struct qdisc_skb_cb *)skb->cb; 332 } 333 334 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 335 { 336 return &qdisc->q.lock; 337 } 338 339 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 340 { 341 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 342 343 return q; 344 } 345 346 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 347 { 348 return qdisc->dev_queue->qdisc_sleeping; 349 } 350 351 /* The qdisc root lock is a mechanism by which to top level 352 * of a qdisc tree can be locked from any qdisc node in the 353 * forest. This allows changing the configuration of some 354 * aspect of the qdisc tree while blocking out asynchronous 355 * qdisc access in the packet processing paths. 356 * 357 * It is only legal to do this when the root will not change 358 * on us. Otherwise we'll potentially lock the wrong qdisc 359 * root. This is enforced by holding the RTNL semaphore, which 360 * all users of this lock accessor must do. 361 */ 362 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 363 { 364 struct Qdisc *root = qdisc_root(qdisc); 365 366 ASSERT_RTNL(); 367 return qdisc_lock(root); 368 } 369 370 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 371 { 372 struct Qdisc *root = qdisc_root_sleeping(qdisc); 373 374 ASSERT_RTNL(); 375 return qdisc_lock(root); 376 } 377 378 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 379 { 380 struct Qdisc *root = qdisc_root_sleeping(qdisc); 381 382 ASSERT_RTNL(); 383 return &root->running; 384 } 385 386 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 387 { 388 return qdisc->dev_queue->dev; 389 } 390 391 static inline void sch_tree_lock(const struct Qdisc *q) 392 { 393 spin_lock_bh(qdisc_root_sleeping_lock(q)); 394 } 395 396 static inline void sch_tree_unlock(const struct Qdisc *q) 397 { 398 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 399 } 400 401 extern struct Qdisc noop_qdisc; 402 extern struct Qdisc_ops noop_qdisc_ops; 403 extern struct Qdisc_ops pfifo_fast_ops; 404 extern struct Qdisc_ops mq_qdisc_ops; 405 extern struct Qdisc_ops noqueue_qdisc_ops; 406 extern const struct Qdisc_ops *default_qdisc_ops; 407 static inline const struct Qdisc_ops * 408 get_default_qdisc_ops(const struct net_device *dev, int ntx) 409 { 410 return ntx < dev->real_num_tx_queues ? 411 default_qdisc_ops : &pfifo_fast_ops; 412 } 413 414 struct Qdisc_class_common { 415 u32 classid; 416 struct hlist_node hnode; 417 }; 418 419 struct Qdisc_class_hash { 420 struct hlist_head *hash; 421 unsigned int hashsize; 422 unsigned int hashmask; 423 unsigned int hashelems; 424 }; 425 426 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 427 { 428 id ^= id >> 8; 429 id ^= id >> 4; 430 return id & mask; 431 } 432 433 static inline struct Qdisc_class_common * 434 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 435 { 436 struct Qdisc_class_common *cl; 437 unsigned int h; 438 439 if (!id) 440 return NULL; 441 442 h = qdisc_class_hash(id, hash->hashmask); 443 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 444 if (cl->classid == id) 445 return cl; 446 } 447 return NULL; 448 } 449 450 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 451 { 452 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 453 454 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 455 } 456 457 int qdisc_class_hash_init(struct Qdisc_class_hash *); 458 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 459 struct Qdisc_class_common *); 460 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 461 struct Qdisc_class_common *); 462 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 463 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 464 465 void dev_init_scheduler(struct net_device *dev); 466 void dev_shutdown(struct net_device *dev); 467 void dev_activate(struct net_device *dev); 468 void dev_deactivate(struct net_device *dev); 469 void dev_deactivate_many(struct list_head *head); 470 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 471 struct Qdisc *qdisc); 472 void qdisc_reset(struct Qdisc *qdisc); 473 void qdisc_destroy(struct Qdisc *qdisc); 474 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 475 unsigned int len); 476 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 477 const struct Qdisc_ops *ops, 478 struct netlink_ext_ack *extack); 479 void qdisc_free(struct Qdisc *qdisc); 480 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 481 const struct Qdisc_ops *ops, u32 parentid, 482 struct netlink_ext_ack *extack); 483 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 484 const struct qdisc_size_table *stab); 485 int skb_do_redirect(struct sk_buff *); 486 487 static inline void skb_reset_tc(struct sk_buff *skb) 488 { 489 #ifdef CONFIG_NET_CLS_ACT 490 skb->tc_redirected = 0; 491 #endif 492 } 493 494 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 495 { 496 #ifdef CONFIG_NET_CLS_ACT 497 return skb->tc_at_ingress; 498 #else 499 return false; 500 #endif 501 } 502 503 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 504 { 505 #ifdef CONFIG_NET_CLS_ACT 506 if (skb->tc_skip_classify) { 507 skb->tc_skip_classify = 0; 508 return true; 509 } 510 #endif 511 return false; 512 } 513 514 /* Reset all TX qdiscs greater then index of a device. */ 515 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 516 { 517 struct Qdisc *qdisc; 518 519 for (; i < dev->num_tx_queues; i++) { 520 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 521 if (qdisc) { 522 spin_lock_bh(qdisc_lock(qdisc)); 523 qdisc_reset(qdisc); 524 spin_unlock_bh(qdisc_lock(qdisc)); 525 } 526 } 527 } 528 529 static inline void qdisc_reset_all_tx(struct net_device *dev) 530 { 531 qdisc_reset_all_tx_gt(dev, 0); 532 } 533 534 /* Are all TX queues of the device empty? */ 535 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 536 { 537 unsigned int i; 538 539 rcu_read_lock(); 540 for (i = 0; i < dev->num_tx_queues; i++) { 541 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 542 const struct Qdisc *q = rcu_dereference(txq->qdisc); 543 544 if (q->q.qlen) { 545 rcu_read_unlock(); 546 return false; 547 } 548 } 549 rcu_read_unlock(); 550 return true; 551 } 552 553 /* Are any of the TX qdiscs changing? */ 554 static inline bool qdisc_tx_changing(const struct net_device *dev) 555 { 556 unsigned int i; 557 558 for (i = 0; i < dev->num_tx_queues; i++) { 559 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 560 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 561 return true; 562 } 563 return false; 564 } 565 566 /* Is the device using the noop qdisc on all queues? */ 567 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 568 { 569 unsigned int i; 570 571 for (i = 0; i < dev->num_tx_queues; i++) { 572 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 573 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 574 return false; 575 } 576 return true; 577 } 578 579 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 580 { 581 return qdisc_skb_cb(skb)->pkt_len; 582 } 583 584 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 585 enum net_xmit_qdisc_t { 586 __NET_XMIT_STOLEN = 0x00010000, 587 __NET_XMIT_BYPASS = 0x00020000, 588 }; 589 590 #ifdef CONFIG_NET_CLS_ACT 591 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 592 #else 593 #define net_xmit_drop_count(e) (1) 594 #endif 595 596 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 597 const struct Qdisc *sch) 598 { 599 #ifdef CONFIG_NET_SCHED 600 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 601 602 if (stab) 603 __qdisc_calculate_pkt_len(skb, stab); 604 #endif 605 } 606 607 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 608 struct sk_buff **to_free) 609 { 610 qdisc_calculate_pkt_len(skb, sch); 611 return sch->enqueue(skb, sch, to_free); 612 } 613 614 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 615 { 616 return q->flags & TCQ_F_CPUSTATS; 617 } 618 619 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 620 __u64 bytes, __u32 packets) 621 { 622 bstats->bytes += bytes; 623 bstats->packets += packets; 624 } 625 626 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 627 const struct sk_buff *skb) 628 { 629 _bstats_update(bstats, 630 qdisc_pkt_len(skb), 631 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 632 } 633 634 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 635 __u64 bytes, __u32 packets) 636 { 637 u64_stats_update_begin(&bstats->syncp); 638 _bstats_update(&bstats->bstats, bytes, packets); 639 u64_stats_update_end(&bstats->syncp); 640 } 641 642 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 643 const struct sk_buff *skb) 644 { 645 u64_stats_update_begin(&bstats->syncp); 646 bstats_update(&bstats->bstats, skb); 647 u64_stats_update_end(&bstats->syncp); 648 } 649 650 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 651 const struct sk_buff *skb) 652 { 653 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 654 } 655 656 static inline void qdisc_bstats_update(struct Qdisc *sch, 657 const struct sk_buff *skb) 658 { 659 bstats_update(&sch->bstats, skb); 660 } 661 662 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 663 const struct sk_buff *skb) 664 { 665 sch->qstats.backlog -= qdisc_pkt_len(skb); 666 } 667 668 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 669 const struct sk_buff *skb) 670 { 671 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 672 } 673 674 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 675 const struct sk_buff *skb) 676 { 677 sch->qstats.backlog += qdisc_pkt_len(skb); 678 } 679 680 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 681 const struct sk_buff *skb) 682 { 683 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 684 } 685 686 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 687 { 688 this_cpu_inc(sch->cpu_qstats->qlen); 689 } 690 691 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 692 { 693 this_cpu_dec(sch->cpu_qstats->qlen); 694 } 695 696 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 697 { 698 this_cpu_inc(sch->cpu_qstats->requeues); 699 } 700 701 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 702 { 703 sch->qstats.drops += count; 704 } 705 706 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 707 { 708 qstats->drops++; 709 } 710 711 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 712 { 713 qstats->overlimits++; 714 } 715 716 static inline void qdisc_qstats_drop(struct Qdisc *sch) 717 { 718 qstats_drop_inc(&sch->qstats); 719 } 720 721 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 722 { 723 this_cpu_inc(sch->cpu_qstats->drops); 724 } 725 726 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 727 { 728 sch->qstats.overlimits++; 729 } 730 731 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 732 { 733 qh->head = NULL; 734 qh->tail = NULL; 735 qh->qlen = 0; 736 } 737 738 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 739 struct qdisc_skb_head *qh) 740 { 741 struct sk_buff *last = qh->tail; 742 743 if (last) { 744 skb->next = NULL; 745 last->next = skb; 746 qh->tail = skb; 747 } else { 748 qh->tail = skb; 749 qh->head = skb; 750 } 751 qh->qlen++; 752 qdisc_qstats_backlog_inc(sch, skb); 753 754 return NET_XMIT_SUCCESS; 755 } 756 757 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 758 { 759 return __qdisc_enqueue_tail(skb, sch, &sch->q); 760 } 761 762 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 763 { 764 struct sk_buff *skb = qh->head; 765 766 if (likely(skb != NULL)) { 767 qh->head = skb->next; 768 qh->qlen--; 769 if (qh->head == NULL) 770 qh->tail = NULL; 771 skb->next = NULL; 772 } 773 774 return skb; 775 } 776 777 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 778 { 779 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 780 781 if (likely(skb != NULL)) { 782 qdisc_qstats_backlog_dec(sch, skb); 783 qdisc_bstats_update(sch, skb); 784 } 785 786 return skb; 787 } 788 789 /* Instead of calling kfree_skb() while root qdisc lock is held, 790 * queue the skb for future freeing at end of __dev_xmit_skb() 791 */ 792 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 793 { 794 skb->next = *to_free; 795 *to_free = skb; 796 } 797 798 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 799 struct qdisc_skb_head *qh, 800 struct sk_buff **to_free) 801 { 802 struct sk_buff *skb = __qdisc_dequeue_head(qh); 803 804 if (likely(skb != NULL)) { 805 unsigned int len = qdisc_pkt_len(skb); 806 807 qdisc_qstats_backlog_dec(sch, skb); 808 __qdisc_drop(skb, to_free); 809 return len; 810 } 811 812 return 0; 813 } 814 815 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 816 struct sk_buff **to_free) 817 { 818 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 819 } 820 821 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 822 { 823 const struct qdisc_skb_head *qh = &sch->q; 824 825 return qh->head; 826 } 827 828 /* generic pseudo peek method for non-work-conserving qdisc */ 829 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 830 { 831 struct sk_buff *skb = skb_peek(&sch->gso_skb); 832 833 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 834 if (!skb) { 835 skb = sch->dequeue(sch); 836 837 if (skb) { 838 __skb_queue_head(&sch->gso_skb, skb); 839 /* it's still part of the queue */ 840 qdisc_qstats_backlog_inc(sch, skb); 841 sch->q.qlen++; 842 } 843 } 844 845 return skb; 846 } 847 848 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 849 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 850 { 851 struct sk_buff *skb = skb_peek(&sch->gso_skb); 852 853 if (skb) { 854 skb = __skb_dequeue(&sch->gso_skb); 855 qdisc_qstats_backlog_dec(sch, skb); 856 sch->q.qlen--; 857 } else { 858 skb = sch->dequeue(sch); 859 } 860 861 return skb; 862 } 863 864 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 865 { 866 /* 867 * We do not know the backlog in bytes of this list, it 868 * is up to the caller to correct it 869 */ 870 ASSERT_RTNL(); 871 if (qh->qlen) { 872 rtnl_kfree_skbs(qh->head, qh->tail); 873 874 qh->head = NULL; 875 qh->tail = NULL; 876 qh->qlen = 0; 877 } 878 } 879 880 static inline void qdisc_reset_queue(struct Qdisc *sch) 881 { 882 __qdisc_reset_queue(&sch->q); 883 sch->qstats.backlog = 0; 884 } 885 886 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 887 struct Qdisc **pold) 888 { 889 struct Qdisc *old; 890 891 sch_tree_lock(sch); 892 old = *pold; 893 *pold = new; 894 if (old != NULL) { 895 unsigned int qlen = old->q.qlen; 896 unsigned int backlog = old->qstats.backlog; 897 898 qdisc_reset(old); 899 qdisc_tree_reduce_backlog(old, qlen, backlog); 900 } 901 sch_tree_unlock(sch); 902 903 return old; 904 } 905 906 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 907 { 908 rtnl_kfree_skbs(skb, skb); 909 qdisc_qstats_drop(sch); 910 } 911 912 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 913 struct sk_buff **to_free) 914 { 915 __qdisc_drop(skb, to_free); 916 qdisc_qstats_cpu_drop(sch); 917 918 return NET_XMIT_DROP; 919 } 920 921 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 922 struct sk_buff **to_free) 923 { 924 __qdisc_drop(skb, to_free); 925 qdisc_qstats_drop(sch); 926 927 return NET_XMIT_DROP; 928 } 929 930 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 931 long it will take to send a packet given its size. 932 */ 933 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 934 { 935 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 936 if (slot < 0) 937 slot = 0; 938 slot >>= rtab->rate.cell_log; 939 if (slot > 255) 940 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 941 return rtab->data[slot]; 942 } 943 944 struct psched_ratecfg { 945 u64 rate_bytes_ps; /* bytes per second */ 946 u32 mult; 947 u16 overhead; 948 u8 linklayer; 949 u8 shift; 950 }; 951 952 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 953 unsigned int len) 954 { 955 len += r->overhead; 956 957 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 958 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 959 960 return ((u64)len * r->mult) >> r->shift; 961 } 962 963 void psched_ratecfg_precompute(struct psched_ratecfg *r, 964 const struct tc_ratespec *conf, 965 u64 rate64); 966 967 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 968 const struct psched_ratecfg *r) 969 { 970 memset(res, 0, sizeof(*res)); 971 972 /* legacy struct tc_ratespec has a 32bit @rate field 973 * Qdisc using 64bit rate should add new attributes 974 * in order to maintain compatibility. 975 */ 976 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 977 978 res->overhead = r->overhead; 979 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 980 } 981 982 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 983 * The fast path only needs to access filter list and to update stats 984 */ 985 struct mini_Qdisc { 986 struct tcf_proto *filter_list; 987 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 988 struct gnet_stats_queue __percpu *cpu_qstats; 989 struct rcu_head rcu; 990 }; 991 992 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 993 const struct sk_buff *skb) 994 { 995 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 996 } 997 998 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 999 { 1000 this_cpu_inc(miniq->cpu_qstats->drops); 1001 } 1002 1003 struct mini_Qdisc_pair { 1004 struct mini_Qdisc miniq1; 1005 struct mini_Qdisc miniq2; 1006 struct mini_Qdisc __rcu **p_miniq; 1007 }; 1008 1009 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1010 struct tcf_proto *tp_head); 1011 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1012 struct mini_Qdisc __rcu **p_miniq); 1013 1014 #endif 1015