1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 23 struct Qdisc_ops; 24 struct qdisc_walker; 25 struct tcf_walker; 26 struct module; 27 struct bpf_flow_keys; 28 29 struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34 }; 35 36 enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 }; 40 41 struct qdisc_size_table { 42 struct rcu_head rcu; 43 struct list_head list; 44 struct tc_sizespec szopts; 45 int refcnt; 46 u16 data[]; 47 }; 48 49 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 50 struct qdisc_skb_head { 51 struct sk_buff *head; 52 struct sk_buff *tail; 53 __u32 qlen; 54 spinlock_t lock; 55 }; 56 57 struct Qdisc { 58 int (*enqueue)(struct sk_buff *skb, 59 struct Qdisc *sch, 60 struct sk_buff **to_free); 61 struct sk_buff * (*dequeue)(struct Qdisc *sch); 62 unsigned int flags; 63 #define TCQ_F_BUILTIN 1 64 #define TCQ_F_INGRESS 2 65 #define TCQ_F_CAN_BYPASS 4 66 #define TCQ_F_MQROOT 8 67 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 68 * q->dev_queue : It can test 69 * netif_xmit_frozen_or_stopped() before 70 * dequeueing next packet. 71 * Its true for MQ/MQPRIO slaves, or non 72 * multiqueue device. 73 */ 74 #define TCQ_F_WARN_NONWC (1 << 16) 75 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 76 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 77 * qdisc_tree_decrease_qlen() should stop. 78 */ 79 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 80 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 81 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 82 u32 limit; 83 const struct Qdisc_ops *ops; 84 struct qdisc_size_table __rcu *stab; 85 struct hlist_node hash; 86 u32 handle; 87 u32 parent; 88 89 struct netdev_queue *dev_queue; 90 91 struct net_rate_estimator __rcu *rate_est; 92 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 93 struct gnet_stats_queue __percpu *cpu_qstats; 94 int padded; 95 refcount_t refcnt; 96 97 /* 98 * For performance sake on SMP, we put highly modified fields at the end 99 */ 100 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 101 struct qdisc_skb_head q; 102 struct gnet_stats_basic_packed bstats; 103 seqcount_t running; 104 struct gnet_stats_queue qstats; 105 unsigned long state; 106 struct Qdisc *next_sched; 107 struct sk_buff_head skb_bad_txq; 108 109 spinlock_t busylock ____cacheline_aligned_in_smp; 110 spinlock_t seqlock; 111 112 /* for NOLOCK qdisc, true if there are no enqueued skbs */ 113 bool empty; 114 struct rcu_head rcu; 115 }; 116 117 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 118 { 119 if (qdisc->flags & TCQ_F_BUILTIN) 120 return; 121 refcount_inc(&qdisc->refcnt); 122 } 123 124 /* Intended to be used by unlocked users, when concurrent qdisc release is 125 * possible. 126 */ 127 128 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 129 { 130 if (qdisc->flags & TCQ_F_BUILTIN) 131 return qdisc; 132 if (refcount_inc_not_zero(&qdisc->refcnt)) 133 return qdisc; 134 return NULL; 135 } 136 137 static inline bool qdisc_is_running(struct Qdisc *qdisc) 138 { 139 if (qdisc->flags & TCQ_F_NOLOCK) 140 return spin_is_locked(&qdisc->seqlock); 141 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; 142 } 143 144 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 145 { 146 return q->flags & TCQ_F_CPUSTATS; 147 } 148 149 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 150 { 151 if (qdisc_is_percpu_stats(qdisc)) 152 return READ_ONCE(qdisc->empty); 153 return !READ_ONCE(qdisc->q.qlen); 154 } 155 156 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 157 { 158 if (qdisc->flags & TCQ_F_NOLOCK) { 159 if (!spin_trylock(&qdisc->seqlock)) 160 return false; 161 WRITE_ONCE(qdisc->empty, false); 162 } else if (qdisc_is_running(qdisc)) { 163 return false; 164 } 165 /* Variant of write_seqcount_begin() telling lockdep a trylock 166 * was attempted. 167 */ 168 raw_write_seqcount_begin(&qdisc->running); 169 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); 170 return true; 171 } 172 173 static inline void qdisc_run_end(struct Qdisc *qdisc) 174 { 175 write_seqcount_end(&qdisc->running); 176 if (qdisc->flags & TCQ_F_NOLOCK) 177 spin_unlock(&qdisc->seqlock); 178 } 179 180 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 181 { 182 return qdisc->flags & TCQ_F_ONETXQUEUE; 183 } 184 185 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 186 { 187 #ifdef CONFIG_BQL 188 /* Non-BQL migrated drivers will return 0, too. */ 189 return dql_avail(&txq->dql); 190 #else 191 return 0; 192 #endif 193 } 194 195 struct Qdisc_class_ops { 196 unsigned int flags; 197 /* Child qdisc manipulation */ 198 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 199 int (*graft)(struct Qdisc *, unsigned long cl, 200 struct Qdisc *, struct Qdisc **, 201 struct netlink_ext_ack *extack); 202 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 203 void (*qlen_notify)(struct Qdisc *, unsigned long); 204 205 /* Class manipulation routines */ 206 unsigned long (*find)(struct Qdisc *, u32 classid); 207 int (*change)(struct Qdisc *, u32, u32, 208 struct nlattr **, unsigned long *, 209 struct netlink_ext_ack *); 210 int (*delete)(struct Qdisc *, unsigned long); 211 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 212 213 /* Filter manipulation */ 214 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 215 unsigned long arg, 216 struct netlink_ext_ack *extack); 217 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 218 u32 classid); 219 void (*unbind_tcf)(struct Qdisc *, unsigned long); 220 221 /* rtnetlink specific */ 222 int (*dump)(struct Qdisc *, unsigned long, 223 struct sk_buff *skb, struct tcmsg*); 224 int (*dump_stats)(struct Qdisc *, unsigned long, 225 struct gnet_dump *); 226 }; 227 228 /* Qdisc_class_ops flag values */ 229 230 /* Implements API that doesn't require rtnl lock */ 231 enum qdisc_class_ops_flags { 232 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 233 }; 234 235 struct Qdisc_ops { 236 struct Qdisc_ops *next; 237 const struct Qdisc_class_ops *cl_ops; 238 char id[IFNAMSIZ]; 239 int priv_size; 240 unsigned int static_flags; 241 242 int (*enqueue)(struct sk_buff *skb, 243 struct Qdisc *sch, 244 struct sk_buff **to_free); 245 struct sk_buff * (*dequeue)(struct Qdisc *); 246 struct sk_buff * (*peek)(struct Qdisc *); 247 248 int (*init)(struct Qdisc *sch, struct nlattr *arg, 249 struct netlink_ext_ack *extack); 250 void (*reset)(struct Qdisc *); 251 void (*destroy)(struct Qdisc *); 252 int (*change)(struct Qdisc *sch, 253 struct nlattr *arg, 254 struct netlink_ext_ack *extack); 255 void (*attach)(struct Qdisc *sch); 256 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 257 258 int (*dump)(struct Qdisc *, struct sk_buff *); 259 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 260 261 void (*ingress_block_set)(struct Qdisc *sch, 262 u32 block_index); 263 void (*egress_block_set)(struct Qdisc *sch, 264 u32 block_index); 265 u32 (*ingress_block_get)(struct Qdisc *sch); 266 u32 (*egress_block_get)(struct Qdisc *sch); 267 268 struct module *owner; 269 }; 270 271 272 struct tcf_result { 273 union { 274 struct { 275 unsigned long class; 276 u32 classid; 277 }; 278 const struct tcf_proto *goto_tp; 279 280 /* used in the skb_tc_reinsert function */ 281 struct { 282 bool ingress; 283 struct gnet_stats_queue *qstats; 284 }; 285 }; 286 }; 287 288 struct tcf_chain; 289 290 struct tcf_proto_ops { 291 struct list_head head; 292 char kind[IFNAMSIZ]; 293 294 int (*classify)(struct sk_buff *, 295 const struct tcf_proto *, 296 struct tcf_result *); 297 int (*init)(struct tcf_proto*); 298 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 299 struct netlink_ext_ack *extack); 300 301 void* (*get)(struct tcf_proto*, u32 handle); 302 void (*put)(struct tcf_proto *tp, void *f); 303 int (*change)(struct net *net, struct sk_buff *, 304 struct tcf_proto*, unsigned long, 305 u32 handle, struct nlattr **, 306 void **, bool, bool, 307 struct netlink_ext_ack *); 308 int (*delete)(struct tcf_proto *tp, void *arg, 309 bool *last, bool rtnl_held, 310 struct netlink_ext_ack *); 311 bool (*delete_empty)(struct tcf_proto *tp); 312 void (*walk)(struct tcf_proto *tp, 313 struct tcf_walker *arg, bool rtnl_held); 314 int (*reoffload)(struct tcf_proto *tp, bool add, 315 flow_setup_cb_t *cb, void *cb_priv, 316 struct netlink_ext_ack *extack); 317 void (*hw_add)(struct tcf_proto *tp, 318 void *type_data); 319 void (*hw_del)(struct tcf_proto *tp, 320 void *type_data); 321 void (*bind_class)(void *, u32, unsigned long); 322 void * (*tmplt_create)(struct net *net, 323 struct tcf_chain *chain, 324 struct nlattr **tca, 325 struct netlink_ext_ack *extack); 326 void (*tmplt_destroy)(void *tmplt_priv); 327 328 /* rtnetlink specific */ 329 int (*dump)(struct net*, struct tcf_proto*, void *, 330 struct sk_buff *skb, struct tcmsg*, 331 bool); 332 int (*tmplt_dump)(struct sk_buff *skb, 333 struct net *net, 334 void *tmplt_priv); 335 336 struct module *owner; 337 int flags; 338 }; 339 340 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 341 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 342 * conditions can occur when filters are inserted/deleted simultaneously. 343 */ 344 enum tcf_proto_ops_flags { 345 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 346 }; 347 348 struct tcf_proto { 349 /* Fast access part */ 350 struct tcf_proto __rcu *next; 351 void __rcu *root; 352 353 /* called under RCU BH lock*/ 354 int (*classify)(struct sk_buff *, 355 const struct tcf_proto *, 356 struct tcf_result *); 357 __be16 protocol; 358 359 /* All the rest */ 360 u32 prio; 361 void *data; 362 const struct tcf_proto_ops *ops; 363 struct tcf_chain *chain; 364 /* Lock protects tcf_proto shared state and can be used by unlocked 365 * classifiers to protect their private data. 366 */ 367 spinlock_t lock; 368 bool deleting; 369 refcount_t refcnt; 370 struct rcu_head rcu; 371 struct hlist_node destroy_ht_node; 372 }; 373 374 struct qdisc_skb_cb { 375 struct { 376 unsigned int pkt_len; 377 u16 slave_dev_queue_mapping; 378 u16 tc_classid; 379 }; 380 #define QDISC_CB_PRIV_LEN 20 381 unsigned char data[QDISC_CB_PRIV_LEN]; 382 }; 383 384 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 385 386 struct tcf_chain { 387 /* Protects filter_chain. */ 388 struct mutex filter_chain_lock; 389 struct tcf_proto __rcu *filter_chain; 390 struct list_head list; 391 struct tcf_block *block; 392 u32 index; /* chain index */ 393 unsigned int refcnt; 394 unsigned int action_refcnt; 395 bool explicitly_created; 396 bool flushing; 397 const struct tcf_proto_ops *tmplt_ops; 398 void *tmplt_priv; 399 struct rcu_head rcu; 400 }; 401 402 struct tcf_block { 403 /* Lock protects tcf_block and lifetime-management data of chains 404 * attached to the block (refcnt, action_refcnt, explicitly_created). 405 */ 406 struct mutex lock; 407 struct list_head chain_list; 408 u32 index; /* block index for shared blocks */ 409 refcount_t refcnt; 410 struct net *net; 411 struct Qdisc *q; 412 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 413 struct flow_block flow_block; 414 struct list_head owner_list; 415 bool keep_dst; 416 atomic_t offloadcnt; /* Number of oddloaded filters */ 417 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 418 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 419 struct { 420 struct tcf_chain *chain; 421 struct list_head filter_chain_list; 422 } chain0; 423 struct rcu_head rcu; 424 DECLARE_HASHTABLE(proto_destroy_ht, 7); 425 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 426 }; 427 428 #ifdef CONFIG_PROVE_LOCKING 429 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 430 { 431 return lockdep_is_held(&chain->filter_chain_lock); 432 } 433 434 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 435 { 436 return lockdep_is_held(&tp->lock); 437 } 438 #else 439 static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain) 440 { 441 return true; 442 } 443 444 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 445 { 446 return true; 447 } 448 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 449 450 #define tcf_chain_dereference(p, chain) \ 451 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 452 453 #define tcf_proto_dereference(p, tp) \ 454 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 455 456 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 457 { 458 struct qdisc_skb_cb *qcb; 459 460 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 461 BUILD_BUG_ON(sizeof(qcb->data) < sz); 462 } 463 464 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 465 { 466 return this_cpu_ptr(q->cpu_qstats)->qlen; 467 } 468 469 static inline int qdisc_qlen(const struct Qdisc *q) 470 { 471 return q->q.qlen; 472 } 473 474 static inline int qdisc_qlen_sum(const struct Qdisc *q) 475 { 476 __u32 qlen = q->qstats.qlen; 477 int i; 478 479 if (qdisc_is_percpu_stats(q)) { 480 for_each_possible_cpu(i) 481 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 482 } else { 483 qlen += q->q.qlen; 484 } 485 486 return qlen; 487 } 488 489 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 490 { 491 return (struct qdisc_skb_cb *)skb->cb; 492 } 493 494 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 495 { 496 return &qdisc->q.lock; 497 } 498 499 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 500 { 501 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 502 503 return q; 504 } 505 506 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 507 { 508 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 509 } 510 511 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 512 { 513 return qdisc->dev_queue->qdisc_sleeping; 514 } 515 516 /* The qdisc root lock is a mechanism by which to top level 517 * of a qdisc tree can be locked from any qdisc node in the 518 * forest. This allows changing the configuration of some 519 * aspect of the qdisc tree while blocking out asynchronous 520 * qdisc access in the packet processing paths. 521 * 522 * It is only legal to do this when the root will not change 523 * on us. Otherwise we'll potentially lock the wrong qdisc 524 * root. This is enforced by holding the RTNL semaphore, which 525 * all users of this lock accessor must do. 526 */ 527 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 528 { 529 struct Qdisc *root = qdisc_root(qdisc); 530 531 ASSERT_RTNL(); 532 return qdisc_lock(root); 533 } 534 535 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 536 { 537 struct Qdisc *root = qdisc_root_sleeping(qdisc); 538 539 ASSERT_RTNL(); 540 return qdisc_lock(root); 541 } 542 543 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) 544 { 545 struct Qdisc *root = qdisc_root_sleeping(qdisc); 546 547 ASSERT_RTNL(); 548 return &root->running; 549 } 550 551 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 552 { 553 return qdisc->dev_queue->dev; 554 } 555 556 static inline void sch_tree_lock(const struct Qdisc *q) 557 { 558 spin_lock_bh(qdisc_root_sleeping_lock(q)); 559 } 560 561 static inline void sch_tree_unlock(const struct Qdisc *q) 562 { 563 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 564 } 565 566 extern struct Qdisc noop_qdisc; 567 extern struct Qdisc_ops noop_qdisc_ops; 568 extern struct Qdisc_ops pfifo_fast_ops; 569 extern struct Qdisc_ops mq_qdisc_ops; 570 extern struct Qdisc_ops noqueue_qdisc_ops; 571 extern const struct Qdisc_ops *default_qdisc_ops; 572 static inline const struct Qdisc_ops * 573 get_default_qdisc_ops(const struct net_device *dev, int ntx) 574 { 575 return ntx < dev->real_num_tx_queues ? 576 default_qdisc_ops : &pfifo_fast_ops; 577 } 578 579 struct Qdisc_class_common { 580 u32 classid; 581 struct hlist_node hnode; 582 }; 583 584 struct Qdisc_class_hash { 585 struct hlist_head *hash; 586 unsigned int hashsize; 587 unsigned int hashmask; 588 unsigned int hashelems; 589 }; 590 591 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 592 { 593 id ^= id >> 8; 594 id ^= id >> 4; 595 return id & mask; 596 } 597 598 static inline struct Qdisc_class_common * 599 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 600 { 601 struct Qdisc_class_common *cl; 602 unsigned int h; 603 604 if (!id) 605 return NULL; 606 607 h = qdisc_class_hash(id, hash->hashmask); 608 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 609 if (cl->classid == id) 610 return cl; 611 } 612 return NULL; 613 } 614 615 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 616 { 617 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 618 619 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 620 } 621 622 int qdisc_class_hash_init(struct Qdisc_class_hash *); 623 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 624 struct Qdisc_class_common *); 625 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 626 struct Qdisc_class_common *); 627 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 628 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 629 630 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 631 void dev_init_scheduler(struct net_device *dev); 632 void dev_shutdown(struct net_device *dev); 633 void dev_activate(struct net_device *dev); 634 void dev_deactivate(struct net_device *dev); 635 void dev_deactivate_many(struct list_head *head); 636 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 637 struct Qdisc *qdisc); 638 void qdisc_reset(struct Qdisc *qdisc); 639 void qdisc_put(struct Qdisc *qdisc); 640 void qdisc_put_unlocked(struct Qdisc *qdisc); 641 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 642 #ifdef CONFIG_NET_SCHED 643 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 644 void *type_data); 645 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 646 struct Qdisc *new, struct Qdisc *old, 647 enum tc_setup_type type, void *type_data, 648 struct netlink_ext_ack *extack); 649 #else 650 static inline int 651 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 652 void *type_data) 653 { 654 q->flags &= ~TCQ_F_OFFLOADED; 655 return 0; 656 } 657 658 static inline void 659 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 660 struct Qdisc *new, struct Qdisc *old, 661 enum tc_setup_type type, void *type_data, 662 struct netlink_ext_ack *extack) 663 { 664 } 665 #endif 666 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 667 const struct Qdisc_ops *ops, 668 struct netlink_ext_ack *extack); 669 void qdisc_free(struct Qdisc *qdisc); 670 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 671 const struct Qdisc_ops *ops, u32 parentid, 672 struct netlink_ext_ack *extack); 673 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 674 const struct qdisc_size_table *stab); 675 int skb_do_redirect(struct sk_buff *); 676 677 static inline void skb_reset_tc(struct sk_buff *skb) 678 { 679 #ifdef CONFIG_NET_CLS_ACT 680 skb->tc_redirected = 0; 681 #endif 682 } 683 684 static inline bool skb_is_tc_redirected(const struct sk_buff *skb) 685 { 686 #ifdef CONFIG_NET_CLS_ACT 687 return skb->tc_redirected; 688 #else 689 return false; 690 #endif 691 } 692 693 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 694 { 695 #ifdef CONFIG_NET_CLS_ACT 696 return skb->tc_at_ingress; 697 #else 698 return false; 699 #endif 700 } 701 702 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 703 { 704 #ifdef CONFIG_NET_CLS_ACT 705 if (skb->tc_skip_classify) { 706 skb->tc_skip_classify = 0; 707 return true; 708 } 709 #endif 710 return false; 711 } 712 713 /* Reset all TX qdiscs greater than index of a device. */ 714 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 715 { 716 struct Qdisc *qdisc; 717 718 for (; i < dev->num_tx_queues; i++) { 719 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 720 if (qdisc) { 721 spin_lock_bh(qdisc_lock(qdisc)); 722 qdisc_reset(qdisc); 723 spin_unlock_bh(qdisc_lock(qdisc)); 724 } 725 } 726 } 727 728 static inline void qdisc_reset_all_tx(struct net_device *dev) 729 { 730 qdisc_reset_all_tx_gt(dev, 0); 731 } 732 733 /* Are all TX queues of the device empty? */ 734 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 735 { 736 unsigned int i; 737 738 rcu_read_lock(); 739 for (i = 0; i < dev->num_tx_queues; i++) { 740 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 741 const struct Qdisc *q = rcu_dereference(txq->qdisc); 742 743 if (!qdisc_is_empty(q)) { 744 rcu_read_unlock(); 745 return false; 746 } 747 } 748 rcu_read_unlock(); 749 return true; 750 } 751 752 /* Are any of the TX qdiscs changing? */ 753 static inline bool qdisc_tx_changing(const struct net_device *dev) 754 { 755 unsigned int i; 756 757 for (i = 0; i < dev->num_tx_queues; i++) { 758 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 759 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 760 return true; 761 } 762 return false; 763 } 764 765 /* Is the device using the noop qdisc on all queues? */ 766 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 767 { 768 unsigned int i; 769 770 for (i = 0; i < dev->num_tx_queues; i++) { 771 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 772 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 773 return false; 774 } 775 return true; 776 } 777 778 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 779 { 780 return qdisc_skb_cb(skb)->pkt_len; 781 } 782 783 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 784 enum net_xmit_qdisc_t { 785 __NET_XMIT_STOLEN = 0x00010000, 786 __NET_XMIT_BYPASS = 0x00020000, 787 }; 788 789 #ifdef CONFIG_NET_CLS_ACT 790 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 791 #else 792 #define net_xmit_drop_count(e) (1) 793 #endif 794 795 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 796 const struct Qdisc *sch) 797 { 798 #ifdef CONFIG_NET_SCHED 799 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 800 801 if (stab) 802 __qdisc_calculate_pkt_len(skb, stab); 803 #endif 804 } 805 806 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 807 struct sk_buff **to_free) 808 { 809 qdisc_calculate_pkt_len(skb, sch); 810 return sch->enqueue(skb, sch, to_free); 811 } 812 813 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, 814 __u64 bytes, __u32 packets) 815 { 816 bstats->bytes += bytes; 817 bstats->packets += packets; 818 } 819 820 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 821 const struct sk_buff *skb) 822 { 823 _bstats_update(bstats, 824 qdisc_pkt_len(skb), 825 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 826 } 827 828 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 829 __u64 bytes, __u32 packets) 830 { 831 u64_stats_update_begin(&bstats->syncp); 832 _bstats_update(&bstats->bstats, bytes, packets); 833 u64_stats_update_end(&bstats->syncp); 834 } 835 836 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, 837 const struct sk_buff *skb) 838 { 839 u64_stats_update_begin(&bstats->syncp); 840 bstats_update(&bstats->bstats, skb); 841 u64_stats_update_end(&bstats->syncp); 842 } 843 844 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 845 const struct sk_buff *skb) 846 { 847 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); 848 } 849 850 static inline void qdisc_bstats_update(struct Qdisc *sch, 851 const struct sk_buff *skb) 852 { 853 bstats_update(&sch->bstats, skb); 854 } 855 856 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 857 const struct sk_buff *skb) 858 { 859 sch->qstats.backlog -= qdisc_pkt_len(skb); 860 } 861 862 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 863 const struct sk_buff *skb) 864 { 865 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 866 } 867 868 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 869 const struct sk_buff *skb) 870 { 871 sch->qstats.backlog += qdisc_pkt_len(skb); 872 } 873 874 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 875 const struct sk_buff *skb) 876 { 877 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 878 } 879 880 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 881 { 882 this_cpu_inc(sch->cpu_qstats->qlen); 883 } 884 885 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 886 { 887 this_cpu_dec(sch->cpu_qstats->qlen); 888 } 889 890 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 891 { 892 this_cpu_inc(sch->cpu_qstats->requeues); 893 } 894 895 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 896 { 897 sch->qstats.drops += count; 898 } 899 900 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 901 { 902 qstats->drops++; 903 } 904 905 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 906 { 907 qstats->overlimits++; 908 } 909 910 static inline void qdisc_qstats_drop(struct Qdisc *sch) 911 { 912 qstats_drop_inc(&sch->qstats); 913 } 914 915 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 916 { 917 this_cpu_inc(sch->cpu_qstats->drops); 918 } 919 920 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 921 { 922 sch->qstats.overlimits++; 923 } 924 925 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 926 { 927 __u32 qlen = qdisc_qlen_sum(sch); 928 929 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 930 } 931 932 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 933 __u32 *backlog) 934 { 935 struct gnet_stats_queue qstats = { 0 }; 936 __u32 len = qdisc_qlen_sum(sch); 937 938 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); 939 *qlen = qstats.qlen; 940 *backlog = qstats.backlog; 941 } 942 943 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 944 { 945 __u32 qlen, backlog; 946 947 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 948 qdisc_tree_reduce_backlog(sch, qlen, backlog); 949 } 950 951 static inline void qdisc_purge_queue(struct Qdisc *sch) 952 { 953 __u32 qlen, backlog; 954 955 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 956 qdisc_reset(sch); 957 qdisc_tree_reduce_backlog(sch, qlen, backlog); 958 } 959 960 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 961 { 962 qh->head = NULL; 963 qh->tail = NULL; 964 qh->qlen = 0; 965 } 966 967 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 968 struct qdisc_skb_head *qh) 969 { 970 struct sk_buff *last = qh->tail; 971 972 if (last) { 973 skb->next = NULL; 974 last->next = skb; 975 qh->tail = skb; 976 } else { 977 qh->tail = skb; 978 qh->head = skb; 979 } 980 qh->qlen++; 981 } 982 983 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 984 { 985 __qdisc_enqueue_tail(skb, &sch->q); 986 qdisc_qstats_backlog_inc(sch, skb); 987 return NET_XMIT_SUCCESS; 988 } 989 990 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 991 struct qdisc_skb_head *qh) 992 { 993 skb->next = qh->head; 994 995 if (!qh->head) 996 qh->tail = skb; 997 qh->head = skb; 998 qh->qlen++; 999 } 1000 1001 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1002 { 1003 struct sk_buff *skb = qh->head; 1004 1005 if (likely(skb != NULL)) { 1006 qh->head = skb->next; 1007 qh->qlen--; 1008 if (qh->head == NULL) 1009 qh->tail = NULL; 1010 skb->next = NULL; 1011 } 1012 1013 return skb; 1014 } 1015 1016 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1017 { 1018 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1019 1020 if (likely(skb != NULL)) { 1021 qdisc_qstats_backlog_dec(sch, skb); 1022 qdisc_bstats_update(sch, skb); 1023 } 1024 1025 return skb; 1026 } 1027 1028 /* Instead of calling kfree_skb() while root qdisc lock is held, 1029 * queue the skb for future freeing at end of __dev_xmit_skb() 1030 */ 1031 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1032 { 1033 skb->next = *to_free; 1034 *to_free = skb; 1035 } 1036 1037 static inline void __qdisc_drop_all(struct sk_buff *skb, 1038 struct sk_buff **to_free) 1039 { 1040 if (skb->prev) 1041 skb->prev->next = *to_free; 1042 else 1043 skb->next = *to_free; 1044 *to_free = skb; 1045 } 1046 1047 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1048 struct qdisc_skb_head *qh, 1049 struct sk_buff **to_free) 1050 { 1051 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1052 1053 if (likely(skb != NULL)) { 1054 unsigned int len = qdisc_pkt_len(skb); 1055 1056 qdisc_qstats_backlog_dec(sch, skb); 1057 __qdisc_drop(skb, to_free); 1058 return len; 1059 } 1060 1061 return 0; 1062 } 1063 1064 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, 1065 struct sk_buff **to_free) 1066 { 1067 return __qdisc_queue_drop_head(sch, &sch->q, to_free); 1068 } 1069 1070 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1071 { 1072 const struct qdisc_skb_head *qh = &sch->q; 1073 1074 return qh->head; 1075 } 1076 1077 /* generic pseudo peek method for non-work-conserving qdisc */ 1078 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1079 { 1080 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1081 1082 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1083 if (!skb) { 1084 skb = sch->dequeue(sch); 1085 1086 if (skb) { 1087 __skb_queue_head(&sch->gso_skb, skb); 1088 /* it's still part of the queue */ 1089 qdisc_qstats_backlog_inc(sch, skb); 1090 sch->q.qlen++; 1091 } 1092 } 1093 1094 return skb; 1095 } 1096 1097 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1098 struct sk_buff *skb) 1099 { 1100 if (qdisc_is_percpu_stats(sch)) { 1101 qdisc_qstats_cpu_backlog_dec(sch, skb); 1102 qdisc_bstats_cpu_update(sch, skb); 1103 qdisc_qstats_cpu_qlen_dec(sch); 1104 } else { 1105 qdisc_qstats_backlog_dec(sch, skb); 1106 qdisc_bstats_update(sch, skb); 1107 sch->q.qlen--; 1108 } 1109 } 1110 1111 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1112 unsigned int pkt_len) 1113 { 1114 if (qdisc_is_percpu_stats(sch)) { 1115 qdisc_qstats_cpu_qlen_inc(sch); 1116 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1117 } else { 1118 sch->qstats.backlog += pkt_len; 1119 sch->q.qlen++; 1120 } 1121 } 1122 1123 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1124 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1125 { 1126 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1127 1128 if (skb) { 1129 skb = __skb_dequeue(&sch->gso_skb); 1130 if (qdisc_is_percpu_stats(sch)) { 1131 qdisc_qstats_cpu_backlog_dec(sch, skb); 1132 qdisc_qstats_cpu_qlen_dec(sch); 1133 } else { 1134 qdisc_qstats_backlog_dec(sch, skb); 1135 sch->q.qlen--; 1136 } 1137 } else { 1138 skb = sch->dequeue(sch); 1139 } 1140 1141 return skb; 1142 } 1143 1144 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1145 { 1146 /* 1147 * We do not know the backlog in bytes of this list, it 1148 * is up to the caller to correct it 1149 */ 1150 ASSERT_RTNL(); 1151 if (qh->qlen) { 1152 rtnl_kfree_skbs(qh->head, qh->tail); 1153 1154 qh->head = NULL; 1155 qh->tail = NULL; 1156 qh->qlen = 0; 1157 } 1158 } 1159 1160 static inline void qdisc_reset_queue(struct Qdisc *sch) 1161 { 1162 __qdisc_reset_queue(&sch->q); 1163 sch->qstats.backlog = 0; 1164 } 1165 1166 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1167 struct Qdisc **pold) 1168 { 1169 struct Qdisc *old; 1170 1171 sch_tree_lock(sch); 1172 old = *pold; 1173 *pold = new; 1174 if (old != NULL) 1175 qdisc_tree_flush_backlog(old); 1176 sch_tree_unlock(sch); 1177 1178 return old; 1179 } 1180 1181 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1182 { 1183 rtnl_kfree_skbs(skb, skb); 1184 qdisc_qstats_drop(sch); 1185 } 1186 1187 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1188 struct sk_buff **to_free) 1189 { 1190 __qdisc_drop(skb, to_free); 1191 qdisc_qstats_cpu_drop(sch); 1192 1193 return NET_XMIT_DROP; 1194 } 1195 1196 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1197 struct sk_buff **to_free) 1198 { 1199 __qdisc_drop(skb, to_free); 1200 qdisc_qstats_drop(sch); 1201 1202 return NET_XMIT_DROP; 1203 } 1204 1205 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1206 struct sk_buff **to_free) 1207 { 1208 __qdisc_drop_all(skb, to_free); 1209 qdisc_qstats_drop(sch); 1210 1211 return NET_XMIT_DROP; 1212 } 1213 1214 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1215 long it will take to send a packet given its size. 1216 */ 1217 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1218 { 1219 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1220 if (slot < 0) 1221 slot = 0; 1222 slot >>= rtab->rate.cell_log; 1223 if (slot > 255) 1224 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1225 return rtab->data[slot]; 1226 } 1227 1228 struct psched_ratecfg { 1229 u64 rate_bytes_ps; /* bytes per second */ 1230 u32 mult; 1231 u16 overhead; 1232 u8 linklayer; 1233 u8 shift; 1234 }; 1235 1236 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1237 unsigned int len) 1238 { 1239 len += r->overhead; 1240 1241 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1242 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1243 1244 return ((u64)len * r->mult) >> r->shift; 1245 } 1246 1247 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1248 const struct tc_ratespec *conf, 1249 u64 rate64); 1250 1251 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1252 const struct psched_ratecfg *r) 1253 { 1254 memset(res, 0, sizeof(*res)); 1255 1256 /* legacy struct tc_ratespec has a 32bit @rate field 1257 * Qdisc using 64bit rate should add new attributes 1258 * in order to maintain compatibility. 1259 */ 1260 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1261 1262 res->overhead = r->overhead; 1263 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1264 } 1265 1266 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1267 * The fast path only needs to access filter list and to update stats 1268 */ 1269 struct mini_Qdisc { 1270 struct tcf_proto *filter_list; 1271 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 1272 struct gnet_stats_queue __percpu *cpu_qstats; 1273 struct rcu_head rcu; 1274 }; 1275 1276 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1277 const struct sk_buff *skb) 1278 { 1279 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1280 } 1281 1282 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1283 { 1284 this_cpu_inc(miniq->cpu_qstats->drops); 1285 } 1286 1287 struct mini_Qdisc_pair { 1288 struct mini_Qdisc miniq1; 1289 struct mini_Qdisc miniq2; 1290 struct mini_Qdisc __rcu **p_miniq; 1291 }; 1292 1293 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1294 struct tcf_proto *tp_head); 1295 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1296 struct mini_Qdisc __rcu **p_miniq); 1297 1298 static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) 1299 { 1300 return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb); 1301 } 1302 1303 #endif 1304