1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_SCHED_GENERIC_H 3 #define __NET_SCHED_GENERIC_H 4 5 #include <linux/netdevice.h> 6 #include <linux/types.h> 7 #include <linux/rcupdate.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <linux/percpu.h> 11 #include <linux/dynamic_queue_limits.h> 12 #include <linux/list.h> 13 #include <linux/refcount.h> 14 #include <linux/workqueue.h> 15 #include <linux/mutex.h> 16 #include <linux/rwsem.h> 17 #include <linux/atomic.h> 18 #include <linux/hashtable.h> 19 #include <net/gen_stats.h> 20 #include <net/rtnetlink.h> 21 #include <net/flow_offload.h> 22 23 struct Qdisc_ops; 24 struct qdisc_walker; 25 struct tcf_walker; 26 struct module; 27 struct bpf_flow_keys; 28 29 struct qdisc_rate_table { 30 struct tc_ratespec rate; 31 u32 data[256]; 32 struct qdisc_rate_table *next; 33 int refcnt; 34 }; 35 36 enum qdisc_state_t { 37 __QDISC_STATE_SCHED, 38 __QDISC_STATE_DEACTIVATED, 39 __QDISC_STATE_MISSED, 40 __QDISC_STATE_DRAINING, 41 }; 42 43 enum qdisc_state2_t { 44 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly. 45 * Use qdisc_run_begin/end() or qdisc_is_running() instead. 46 */ 47 __QDISC_STATE2_RUNNING, 48 }; 49 50 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) 51 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) 52 53 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ 54 QDISC_STATE_DRAINING) 55 56 struct qdisc_size_table { 57 struct rcu_head rcu; 58 struct list_head list; 59 struct tc_sizespec szopts; 60 int refcnt; 61 u16 data[]; 62 }; 63 64 /* similar to sk_buff_head, but skb->prev pointer is undefined. */ 65 struct qdisc_skb_head { 66 struct sk_buff *head; 67 struct sk_buff *tail; 68 __u32 qlen; 69 spinlock_t lock; 70 }; 71 72 struct Qdisc { 73 int (*enqueue)(struct sk_buff *skb, 74 struct Qdisc *sch, 75 struct sk_buff **to_free); 76 struct sk_buff * (*dequeue)(struct Qdisc *sch); 77 unsigned int flags; 78 #define TCQ_F_BUILTIN 1 79 #define TCQ_F_INGRESS 2 80 #define TCQ_F_CAN_BYPASS 4 81 #define TCQ_F_MQROOT 8 82 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for 83 * q->dev_queue : It can test 84 * netif_xmit_frozen_or_stopped() before 85 * dequeueing next packet. 86 * Its true for MQ/MQPRIO slaves, or non 87 * multiqueue device. 88 */ 89 #define TCQ_F_WARN_NONWC (1 << 16) 90 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 91 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : 92 * qdisc_tree_decrease_qlen() should stop. 93 */ 94 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 95 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 96 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 97 u32 limit; 98 const struct Qdisc_ops *ops; 99 struct qdisc_size_table __rcu *stab; 100 struct hlist_node hash; 101 u32 handle; 102 u32 parent; 103 104 struct netdev_queue *dev_queue; 105 106 struct net_rate_estimator __rcu *rate_est; 107 struct gnet_stats_basic_sync __percpu *cpu_bstats; 108 struct gnet_stats_queue __percpu *cpu_qstats; 109 int pad; 110 refcount_t refcnt; 111 112 /* 113 * For performance sake on SMP, we put highly modified fields at the end 114 */ 115 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; 116 struct qdisc_skb_head q; 117 struct gnet_stats_basic_sync bstats; 118 struct gnet_stats_queue qstats; 119 unsigned long state; 120 unsigned long state2; /* must be written under qdisc spinlock */ 121 struct Qdisc *next_sched; 122 struct sk_buff_head skb_bad_txq; 123 124 spinlock_t busylock ____cacheline_aligned_in_smp; 125 spinlock_t seqlock; 126 127 struct rcu_head rcu; 128 netdevice_tracker dev_tracker; 129 /* private data */ 130 long privdata[] ____cacheline_aligned; 131 }; 132 133 static inline void qdisc_refcount_inc(struct Qdisc *qdisc) 134 { 135 if (qdisc->flags & TCQ_F_BUILTIN) 136 return; 137 refcount_inc(&qdisc->refcnt); 138 } 139 140 /* Intended to be used by unlocked users, when concurrent qdisc release is 141 * possible. 142 */ 143 144 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) 145 { 146 if (qdisc->flags & TCQ_F_BUILTIN) 147 return qdisc; 148 if (refcount_inc_not_zero(&qdisc->refcnt)) 149 return qdisc; 150 return NULL; 151 } 152 153 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc 154 * root_lock section, or provide their own memory barriers -- ordering 155 * against qdisc_run_begin/end() atomic bit operations. 156 */ 157 static inline bool qdisc_is_running(struct Qdisc *qdisc) 158 { 159 if (qdisc->flags & TCQ_F_NOLOCK) 160 return spin_is_locked(&qdisc->seqlock); 161 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 162 } 163 164 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) 165 { 166 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); 167 } 168 169 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 170 { 171 return q->flags & TCQ_F_CPUSTATS; 172 } 173 174 static inline bool qdisc_is_empty(const struct Qdisc *qdisc) 175 { 176 if (qdisc_is_percpu_stats(qdisc)) 177 return nolock_qdisc_is_empty(qdisc); 178 return !READ_ONCE(qdisc->q.qlen); 179 } 180 181 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with 182 * the qdisc root lock acquired. 183 */ 184 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 185 { 186 if (qdisc->flags & TCQ_F_NOLOCK) { 187 if (spin_trylock(&qdisc->seqlock)) 188 return true; 189 190 /* Paired with smp_mb__after_atomic() to make sure 191 * STATE_MISSED checking is synchronized with clearing 192 * in pfifo_fast_dequeue(). 193 */ 194 smp_mb__before_atomic(); 195 196 /* If the MISSED flag is set, it means other thread has 197 * set the MISSED flag before second spin_trylock(), so 198 * we can return false here to avoid multi cpus doing 199 * the set_bit() and second spin_trylock() concurrently. 200 */ 201 if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) 202 return false; 203 204 /* Set the MISSED flag before the second spin_trylock(), 205 * if the second spin_trylock() return false, it means 206 * other cpu holding the lock will do dequeuing for us 207 * or it will see the MISSED flag set after releasing 208 * lock and reschedule the net_tx_action() to do the 209 * dequeuing. 210 */ 211 set_bit(__QDISC_STATE_MISSED, &qdisc->state); 212 213 /* spin_trylock() only has load-acquire semantic, so use 214 * smp_mb__after_atomic() to ensure STATE_MISSED is set 215 * before doing the second spin_trylock(). 216 */ 217 smp_mb__after_atomic(); 218 219 /* Retry again in case other CPU may not see the new flag 220 * after it releases the lock at the end of qdisc_run_end(). 221 */ 222 return spin_trylock(&qdisc->seqlock); 223 } 224 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 225 } 226 227 static inline void qdisc_run_end(struct Qdisc *qdisc) 228 { 229 if (qdisc->flags & TCQ_F_NOLOCK) { 230 spin_unlock(&qdisc->seqlock); 231 232 if (unlikely(test_bit(__QDISC_STATE_MISSED, 233 &qdisc->state))) 234 __netif_schedule(qdisc); 235 } else { 236 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2); 237 } 238 } 239 240 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) 241 { 242 return qdisc->flags & TCQ_F_ONETXQUEUE; 243 } 244 245 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) 246 { 247 #ifdef CONFIG_BQL 248 /* Non-BQL migrated drivers will return 0, too. */ 249 return dql_avail(&txq->dql); 250 #else 251 return 0; 252 #endif 253 } 254 255 struct Qdisc_class_ops { 256 unsigned int flags; 257 /* Child qdisc manipulation */ 258 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 259 int (*graft)(struct Qdisc *, unsigned long cl, 260 struct Qdisc *, struct Qdisc **, 261 struct netlink_ext_ack *extack); 262 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 263 void (*qlen_notify)(struct Qdisc *, unsigned long); 264 265 /* Class manipulation routines */ 266 unsigned long (*find)(struct Qdisc *, u32 classid); 267 int (*change)(struct Qdisc *, u32, u32, 268 struct nlattr **, unsigned long *, 269 struct netlink_ext_ack *); 270 int (*delete)(struct Qdisc *, unsigned long, 271 struct netlink_ext_ack *); 272 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 273 274 /* Filter manipulation */ 275 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 276 unsigned long arg, 277 struct netlink_ext_ack *extack); 278 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 279 u32 classid); 280 void (*unbind_tcf)(struct Qdisc *, unsigned long); 281 282 /* rtnetlink specific */ 283 int (*dump)(struct Qdisc *, unsigned long, 284 struct sk_buff *skb, struct tcmsg*); 285 int (*dump_stats)(struct Qdisc *, unsigned long, 286 struct gnet_dump *); 287 }; 288 289 /* Qdisc_class_ops flag values */ 290 291 /* Implements API that doesn't require rtnl lock */ 292 enum qdisc_class_ops_flags { 293 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, 294 }; 295 296 struct Qdisc_ops { 297 struct Qdisc_ops *next; 298 const struct Qdisc_class_ops *cl_ops; 299 char id[IFNAMSIZ]; 300 int priv_size; 301 unsigned int static_flags; 302 303 int (*enqueue)(struct sk_buff *skb, 304 struct Qdisc *sch, 305 struct sk_buff **to_free); 306 struct sk_buff * (*dequeue)(struct Qdisc *); 307 struct sk_buff * (*peek)(struct Qdisc *); 308 309 int (*init)(struct Qdisc *sch, struct nlattr *arg, 310 struct netlink_ext_ack *extack); 311 void (*reset)(struct Qdisc *); 312 void (*destroy)(struct Qdisc *); 313 int (*change)(struct Qdisc *sch, 314 struct nlattr *arg, 315 struct netlink_ext_ack *extack); 316 void (*attach)(struct Qdisc *sch); 317 int (*change_tx_queue_len)(struct Qdisc *, unsigned int); 318 void (*change_real_num_tx)(struct Qdisc *sch, 319 unsigned int new_real_tx); 320 321 int (*dump)(struct Qdisc *, struct sk_buff *); 322 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 323 324 void (*ingress_block_set)(struct Qdisc *sch, 325 u32 block_index); 326 void (*egress_block_set)(struct Qdisc *sch, 327 u32 block_index); 328 u32 (*ingress_block_get)(struct Qdisc *sch); 329 u32 (*egress_block_get)(struct Qdisc *sch); 330 331 struct module *owner; 332 }; 333 334 335 struct tcf_result { 336 union { 337 struct { 338 unsigned long class; 339 u32 classid; 340 }; 341 const struct tcf_proto *goto_tp; 342 343 /* used in the skb_tc_reinsert function */ 344 struct { 345 bool ingress; 346 struct gnet_stats_queue *qstats; 347 }; 348 }; 349 }; 350 351 struct tcf_chain; 352 353 struct tcf_proto_ops { 354 struct list_head head; 355 char kind[IFNAMSIZ]; 356 357 int (*classify)(struct sk_buff *, 358 const struct tcf_proto *, 359 struct tcf_result *); 360 int (*init)(struct tcf_proto*); 361 void (*destroy)(struct tcf_proto *tp, bool rtnl_held, 362 struct netlink_ext_ack *extack); 363 364 void* (*get)(struct tcf_proto*, u32 handle); 365 void (*put)(struct tcf_proto *tp, void *f); 366 int (*change)(struct net *net, struct sk_buff *, 367 struct tcf_proto*, unsigned long, 368 u32 handle, struct nlattr **, 369 void **, u32, 370 struct netlink_ext_ack *); 371 int (*delete)(struct tcf_proto *tp, void *arg, 372 bool *last, bool rtnl_held, 373 struct netlink_ext_ack *); 374 bool (*delete_empty)(struct tcf_proto *tp); 375 void (*walk)(struct tcf_proto *tp, 376 struct tcf_walker *arg, bool rtnl_held); 377 int (*reoffload)(struct tcf_proto *tp, bool add, 378 flow_setup_cb_t *cb, void *cb_priv, 379 struct netlink_ext_ack *extack); 380 void (*hw_add)(struct tcf_proto *tp, 381 void *type_data); 382 void (*hw_del)(struct tcf_proto *tp, 383 void *type_data); 384 void (*bind_class)(void *, u32, unsigned long, 385 void *, unsigned long); 386 void * (*tmplt_create)(struct net *net, 387 struct tcf_chain *chain, 388 struct nlattr **tca, 389 struct netlink_ext_ack *extack); 390 void (*tmplt_destroy)(void *tmplt_priv); 391 392 /* rtnetlink specific */ 393 int (*dump)(struct net*, struct tcf_proto*, void *, 394 struct sk_buff *skb, struct tcmsg*, 395 bool); 396 int (*terse_dump)(struct net *net, 397 struct tcf_proto *tp, void *fh, 398 struct sk_buff *skb, 399 struct tcmsg *t, bool rtnl_held); 400 int (*tmplt_dump)(struct sk_buff *skb, 401 struct net *net, 402 void *tmplt_priv); 403 404 struct module *owner; 405 int flags; 406 }; 407 408 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 409 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 410 * conditions can occur when filters are inserted/deleted simultaneously. 411 */ 412 enum tcf_proto_ops_flags { 413 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 414 }; 415 416 struct tcf_proto { 417 /* Fast access part */ 418 struct tcf_proto __rcu *next; 419 void __rcu *root; 420 421 /* called under RCU BH lock*/ 422 int (*classify)(struct sk_buff *, 423 const struct tcf_proto *, 424 struct tcf_result *); 425 __be16 protocol; 426 427 /* All the rest */ 428 u32 prio; 429 void *data; 430 const struct tcf_proto_ops *ops; 431 struct tcf_chain *chain; 432 /* Lock protects tcf_proto shared state and can be used by unlocked 433 * classifiers to protect their private data. 434 */ 435 spinlock_t lock; 436 bool deleting; 437 refcount_t refcnt; 438 struct rcu_head rcu; 439 struct hlist_node destroy_ht_node; 440 }; 441 442 struct qdisc_skb_cb { 443 struct { 444 unsigned int pkt_len; 445 u16 slave_dev_queue_mapping; 446 u16 tc_classid; 447 }; 448 #define QDISC_CB_PRIV_LEN 20 449 unsigned char data[QDISC_CB_PRIV_LEN]; 450 }; 451 452 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); 453 454 struct tcf_chain { 455 /* Protects filter_chain. */ 456 struct mutex filter_chain_lock; 457 struct tcf_proto __rcu *filter_chain; 458 struct list_head list; 459 struct tcf_block *block; 460 u32 index; /* chain index */ 461 unsigned int refcnt; 462 unsigned int action_refcnt; 463 bool explicitly_created; 464 bool flushing; 465 const struct tcf_proto_ops *tmplt_ops; 466 void *tmplt_priv; 467 struct rcu_head rcu; 468 }; 469 470 struct tcf_block { 471 /* Lock protects tcf_block and lifetime-management data of chains 472 * attached to the block (refcnt, action_refcnt, explicitly_created). 473 */ 474 struct mutex lock; 475 struct list_head chain_list; 476 u32 index; /* block index for shared blocks */ 477 u32 classid; /* which class this block belongs to */ 478 refcount_t refcnt; 479 struct net *net; 480 struct Qdisc *q; 481 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ 482 struct flow_block flow_block; 483 struct list_head owner_list; 484 bool keep_dst; 485 atomic_t offloadcnt; /* Number of oddloaded filters */ 486 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 487 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ 488 struct { 489 struct tcf_chain *chain; 490 struct list_head filter_chain_list; 491 } chain0; 492 struct rcu_head rcu; 493 DECLARE_HASHTABLE(proto_destroy_ht, 7); 494 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ 495 }; 496 497 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) 498 { 499 return lockdep_is_held(&chain->filter_chain_lock); 500 } 501 502 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) 503 { 504 return lockdep_is_held(&tp->lock); 505 } 506 507 #define tcf_chain_dereference(p, chain) \ 508 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) 509 510 #define tcf_proto_dereference(p, tp) \ 511 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) 512 513 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 514 { 515 struct qdisc_skb_cb *qcb; 516 517 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); 518 BUILD_BUG_ON(sizeof(qcb->data) < sz); 519 } 520 521 static inline int qdisc_qlen_cpu(const struct Qdisc *q) 522 { 523 return this_cpu_ptr(q->cpu_qstats)->qlen; 524 } 525 526 static inline int qdisc_qlen(const struct Qdisc *q) 527 { 528 return q->q.qlen; 529 } 530 531 static inline int qdisc_qlen_sum(const struct Qdisc *q) 532 { 533 __u32 qlen = q->qstats.qlen; 534 int i; 535 536 if (qdisc_is_percpu_stats(q)) { 537 for_each_possible_cpu(i) 538 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; 539 } else { 540 qlen += q->q.qlen; 541 } 542 543 return qlen; 544 } 545 546 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 547 { 548 return (struct qdisc_skb_cb *)skb->cb; 549 } 550 551 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 552 { 553 return &qdisc->q.lock; 554 } 555 556 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 557 { 558 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); 559 560 return q; 561 } 562 563 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) 564 { 565 return rcu_dereference_bh(qdisc->dev_queue->qdisc); 566 } 567 568 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 569 { 570 return qdisc->dev_queue->qdisc_sleeping; 571 } 572 573 /* The qdisc root lock is a mechanism by which to top level 574 * of a qdisc tree can be locked from any qdisc node in the 575 * forest. This allows changing the configuration of some 576 * aspect of the qdisc tree while blocking out asynchronous 577 * qdisc access in the packet processing paths. 578 * 579 * It is only legal to do this when the root will not change 580 * on us. Otherwise we'll potentially lock the wrong qdisc 581 * root. This is enforced by holding the RTNL semaphore, which 582 * all users of this lock accessor must do. 583 */ 584 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) 585 { 586 struct Qdisc *root = qdisc_root(qdisc); 587 588 ASSERT_RTNL(); 589 return qdisc_lock(root); 590 } 591 592 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) 593 { 594 struct Qdisc *root = qdisc_root_sleeping(qdisc); 595 596 ASSERT_RTNL(); 597 return qdisc_lock(root); 598 } 599 600 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) 601 { 602 return qdisc->dev_queue->dev; 603 } 604 605 static inline void sch_tree_lock(struct Qdisc *q) 606 { 607 if (q->flags & TCQ_F_MQROOT) 608 spin_lock_bh(qdisc_lock(q)); 609 else 610 spin_lock_bh(qdisc_root_sleeping_lock(q)); 611 } 612 613 static inline void sch_tree_unlock(struct Qdisc *q) 614 { 615 if (q->flags & TCQ_F_MQROOT) 616 spin_unlock_bh(qdisc_lock(q)); 617 else 618 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 619 } 620 621 extern struct Qdisc noop_qdisc; 622 extern struct Qdisc_ops noop_qdisc_ops; 623 extern struct Qdisc_ops pfifo_fast_ops; 624 extern struct Qdisc_ops mq_qdisc_ops; 625 extern struct Qdisc_ops noqueue_qdisc_ops; 626 extern const struct Qdisc_ops *default_qdisc_ops; 627 static inline const struct Qdisc_ops * 628 get_default_qdisc_ops(const struct net_device *dev, int ntx) 629 { 630 return ntx < dev->real_num_tx_queues ? 631 default_qdisc_ops : &pfifo_fast_ops; 632 } 633 634 struct Qdisc_class_common { 635 u32 classid; 636 struct hlist_node hnode; 637 }; 638 639 struct Qdisc_class_hash { 640 struct hlist_head *hash; 641 unsigned int hashsize; 642 unsigned int hashmask; 643 unsigned int hashelems; 644 }; 645 646 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 647 { 648 id ^= id >> 8; 649 id ^= id >> 4; 650 return id & mask; 651 } 652 653 static inline struct Qdisc_class_common * 654 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) 655 { 656 struct Qdisc_class_common *cl; 657 unsigned int h; 658 659 if (!id) 660 return NULL; 661 662 h = qdisc_class_hash(id, hash->hashmask); 663 hlist_for_each_entry(cl, &hash->hash[h], hnode) { 664 if (cl->classid == id) 665 return cl; 666 } 667 return NULL; 668 } 669 670 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) 671 { 672 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; 673 674 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; 675 } 676 677 int qdisc_class_hash_init(struct Qdisc_class_hash *); 678 void qdisc_class_hash_insert(struct Qdisc_class_hash *, 679 struct Qdisc_class_common *); 680 void qdisc_class_hash_remove(struct Qdisc_class_hash *, 681 struct Qdisc_class_common *); 682 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 683 void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 684 685 int dev_qdisc_change_tx_queue_len(struct net_device *dev); 686 void dev_qdisc_change_real_num_tx(struct net_device *dev, 687 unsigned int new_real_tx); 688 void dev_init_scheduler(struct net_device *dev); 689 void dev_shutdown(struct net_device *dev); 690 void dev_activate(struct net_device *dev); 691 void dev_deactivate(struct net_device *dev); 692 void dev_deactivate_many(struct list_head *head); 693 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 694 struct Qdisc *qdisc); 695 void qdisc_reset(struct Qdisc *qdisc); 696 void qdisc_put(struct Qdisc *qdisc); 697 void qdisc_put_unlocked(struct Qdisc *qdisc); 698 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 699 #ifdef CONFIG_NET_SCHED 700 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 701 void *type_data); 702 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 703 struct Qdisc *new, struct Qdisc *old, 704 enum tc_setup_type type, void *type_data, 705 struct netlink_ext_ack *extack); 706 #else 707 static inline int 708 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 709 void *type_data) 710 { 711 q->flags &= ~TCQ_F_OFFLOADED; 712 return 0; 713 } 714 715 static inline void 716 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, 717 struct Qdisc *new, struct Qdisc *old, 718 enum tc_setup_type type, void *type_data, 719 struct netlink_ext_ack *extack) 720 { 721 } 722 #endif 723 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 724 const struct Qdisc_ops *ops, 725 struct netlink_ext_ack *extack); 726 void qdisc_free(struct Qdisc *qdisc); 727 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 728 const struct Qdisc_ops *ops, u32 parentid, 729 struct netlink_ext_ack *extack); 730 void __qdisc_calculate_pkt_len(struct sk_buff *skb, 731 const struct qdisc_size_table *stab); 732 int skb_do_redirect(struct sk_buff *); 733 734 static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 735 { 736 #ifdef CONFIG_NET_CLS_ACT 737 return skb->tc_at_ingress; 738 #else 739 return false; 740 #endif 741 } 742 743 static inline bool skb_skip_tc_classify(struct sk_buff *skb) 744 { 745 #ifdef CONFIG_NET_CLS_ACT 746 if (skb->tc_skip_classify) { 747 skb->tc_skip_classify = 0; 748 return true; 749 } 750 #endif 751 return false; 752 } 753 754 /* Reset all TX qdiscs greater than index of a device. */ 755 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 756 { 757 struct Qdisc *qdisc; 758 759 for (; i < dev->num_tx_queues; i++) { 760 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); 761 if (qdisc) { 762 spin_lock_bh(qdisc_lock(qdisc)); 763 qdisc_reset(qdisc); 764 spin_unlock_bh(qdisc_lock(qdisc)); 765 } 766 } 767 } 768 769 /* Are all TX queues of the device empty? */ 770 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 771 { 772 unsigned int i; 773 774 rcu_read_lock(); 775 for (i = 0; i < dev->num_tx_queues; i++) { 776 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 777 const struct Qdisc *q = rcu_dereference(txq->qdisc); 778 779 if (!qdisc_is_empty(q)) { 780 rcu_read_unlock(); 781 return false; 782 } 783 } 784 rcu_read_unlock(); 785 return true; 786 } 787 788 /* Are any of the TX qdiscs changing? */ 789 static inline bool qdisc_tx_changing(const struct net_device *dev) 790 { 791 unsigned int i; 792 793 for (i = 0; i < dev->num_tx_queues; i++) { 794 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 795 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) 796 return true; 797 } 798 return false; 799 } 800 801 /* Is the device using the noop qdisc on all queues? */ 802 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 803 { 804 unsigned int i; 805 806 for (i = 0; i < dev->num_tx_queues; i++) { 807 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 808 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) 809 return false; 810 } 811 return true; 812 } 813 814 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 815 { 816 return qdisc_skb_cb(skb)->pkt_len; 817 } 818 819 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 820 enum net_xmit_qdisc_t { 821 __NET_XMIT_STOLEN = 0x00010000, 822 __NET_XMIT_BYPASS = 0x00020000, 823 }; 824 825 #ifdef CONFIG_NET_CLS_ACT 826 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 827 #else 828 #define net_xmit_drop_count(e) (1) 829 #endif 830 831 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, 832 const struct Qdisc *sch) 833 { 834 #ifdef CONFIG_NET_SCHED 835 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); 836 837 if (stab) 838 __qdisc_calculate_pkt_len(skb, stab); 839 #endif 840 } 841 842 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 843 struct sk_buff **to_free) 844 { 845 qdisc_calculate_pkt_len(skb, sch); 846 return sch->enqueue(skb, sch, to_free); 847 } 848 849 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, 850 __u64 bytes, __u32 packets) 851 { 852 u64_stats_update_begin(&bstats->syncp); 853 u64_stats_add(&bstats->bytes, bytes); 854 u64_stats_add(&bstats->packets, packets); 855 u64_stats_update_end(&bstats->syncp); 856 } 857 858 static inline void bstats_update(struct gnet_stats_basic_sync *bstats, 859 const struct sk_buff *skb) 860 { 861 _bstats_update(bstats, 862 qdisc_pkt_len(skb), 863 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); 864 } 865 866 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, 867 const struct sk_buff *skb) 868 { 869 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); 870 } 871 872 static inline void qdisc_bstats_update(struct Qdisc *sch, 873 const struct sk_buff *skb) 874 { 875 bstats_update(&sch->bstats, skb); 876 } 877 878 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, 879 const struct sk_buff *skb) 880 { 881 sch->qstats.backlog -= qdisc_pkt_len(skb); 882 } 883 884 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, 885 const struct sk_buff *skb) 886 { 887 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 888 } 889 890 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 891 const struct sk_buff *skb) 892 { 893 sch->qstats.backlog += qdisc_pkt_len(skb); 894 } 895 896 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, 897 const struct sk_buff *skb) 898 { 899 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); 900 } 901 902 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) 903 { 904 this_cpu_inc(sch->cpu_qstats->qlen); 905 } 906 907 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) 908 { 909 this_cpu_dec(sch->cpu_qstats->qlen); 910 } 911 912 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) 913 { 914 this_cpu_inc(sch->cpu_qstats->requeues); 915 } 916 917 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 918 { 919 sch->qstats.drops += count; 920 } 921 922 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) 923 { 924 qstats->drops++; 925 } 926 927 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) 928 { 929 qstats->overlimits++; 930 } 931 932 static inline void qdisc_qstats_drop(struct Qdisc *sch) 933 { 934 qstats_drop_inc(&sch->qstats); 935 } 936 937 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 938 { 939 this_cpu_inc(sch->cpu_qstats->drops); 940 } 941 942 static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 943 { 944 sch->qstats.overlimits++; 945 } 946 947 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) 948 { 949 __u32 qlen = qdisc_qlen_sum(sch); 950 951 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); 952 } 953 954 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, 955 __u32 *backlog) 956 { 957 struct gnet_stats_queue qstats = { 0 }; 958 959 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); 960 *qlen = qstats.qlen + qdisc_qlen(sch); 961 *backlog = qstats.backlog; 962 } 963 964 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) 965 { 966 __u32 qlen, backlog; 967 968 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 969 qdisc_tree_reduce_backlog(sch, qlen, backlog); 970 } 971 972 static inline void qdisc_purge_queue(struct Qdisc *sch) 973 { 974 __u32 qlen, backlog; 975 976 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); 977 qdisc_reset(sch); 978 qdisc_tree_reduce_backlog(sch, qlen, backlog); 979 } 980 981 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 982 { 983 qh->head = NULL; 984 qh->tail = NULL; 985 qh->qlen = 0; 986 } 987 988 static inline void __qdisc_enqueue_tail(struct sk_buff *skb, 989 struct qdisc_skb_head *qh) 990 { 991 struct sk_buff *last = qh->tail; 992 993 if (last) { 994 skb->next = NULL; 995 last->next = skb; 996 qh->tail = skb; 997 } else { 998 qh->tail = skb; 999 qh->head = skb; 1000 } 1001 qh->qlen++; 1002 } 1003 1004 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 1005 { 1006 __qdisc_enqueue_tail(skb, &sch->q); 1007 qdisc_qstats_backlog_inc(sch, skb); 1008 return NET_XMIT_SUCCESS; 1009 } 1010 1011 static inline void __qdisc_enqueue_head(struct sk_buff *skb, 1012 struct qdisc_skb_head *qh) 1013 { 1014 skb->next = qh->head; 1015 1016 if (!qh->head) 1017 qh->tail = skb; 1018 qh->head = skb; 1019 qh->qlen++; 1020 } 1021 1022 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) 1023 { 1024 struct sk_buff *skb = qh->head; 1025 1026 if (likely(skb != NULL)) { 1027 qh->head = skb->next; 1028 qh->qlen--; 1029 if (qh->head == NULL) 1030 qh->tail = NULL; 1031 skb->next = NULL; 1032 } 1033 1034 return skb; 1035 } 1036 1037 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 1038 { 1039 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); 1040 1041 if (likely(skb != NULL)) { 1042 qdisc_qstats_backlog_dec(sch, skb); 1043 qdisc_bstats_update(sch, skb); 1044 } 1045 1046 return skb; 1047 } 1048 1049 /* Instead of calling kfree_skb() while root qdisc lock is held, 1050 * queue the skb for future freeing at end of __dev_xmit_skb() 1051 */ 1052 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) 1053 { 1054 skb->next = *to_free; 1055 *to_free = skb; 1056 } 1057 1058 static inline void __qdisc_drop_all(struct sk_buff *skb, 1059 struct sk_buff **to_free) 1060 { 1061 if (skb->prev) 1062 skb->prev->next = *to_free; 1063 else 1064 skb->next = *to_free; 1065 *to_free = skb; 1066 } 1067 1068 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 1069 struct qdisc_skb_head *qh, 1070 struct sk_buff **to_free) 1071 { 1072 struct sk_buff *skb = __qdisc_dequeue_head(qh); 1073 1074 if (likely(skb != NULL)) { 1075 unsigned int len = qdisc_pkt_len(skb); 1076 1077 qdisc_qstats_backlog_dec(sch, skb); 1078 __qdisc_drop(skb, to_free); 1079 return len; 1080 } 1081 1082 return 0; 1083 } 1084 1085 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 1086 { 1087 const struct qdisc_skb_head *qh = &sch->q; 1088 1089 return qh->head; 1090 } 1091 1092 /* generic pseudo peek method for non-work-conserving qdisc */ 1093 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 1094 { 1095 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1096 1097 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 1098 if (!skb) { 1099 skb = sch->dequeue(sch); 1100 1101 if (skb) { 1102 __skb_queue_head(&sch->gso_skb, skb); 1103 /* it's still part of the queue */ 1104 qdisc_qstats_backlog_inc(sch, skb); 1105 sch->q.qlen++; 1106 } 1107 } 1108 1109 return skb; 1110 } 1111 1112 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, 1113 struct sk_buff *skb) 1114 { 1115 if (qdisc_is_percpu_stats(sch)) { 1116 qdisc_qstats_cpu_backlog_dec(sch, skb); 1117 qdisc_bstats_cpu_update(sch, skb); 1118 qdisc_qstats_cpu_qlen_dec(sch); 1119 } else { 1120 qdisc_qstats_backlog_dec(sch, skb); 1121 qdisc_bstats_update(sch, skb); 1122 sch->q.qlen--; 1123 } 1124 } 1125 1126 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, 1127 unsigned int pkt_len) 1128 { 1129 if (qdisc_is_percpu_stats(sch)) { 1130 qdisc_qstats_cpu_qlen_inc(sch); 1131 this_cpu_add(sch->cpu_qstats->backlog, pkt_len); 1132 } else { 1133 sch->qstats.backlog += pkt_len; 1134 sch->q.qlen++; 1135 } 1136 } 1137 1138 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 1139 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 1140 { 1141 struct sk_buff *skb = skb_peek(&sch->gso_skb); 1142 1143 if (skb) { 1144 skb = __skb_dequeue(&sch->gso_skb); 1145 if (qdisc_is_percpu_stats(sch)) { 1146 qdisc_qstats_cpu_backlog_dec(sch, skb); 1147 qdisc_qstats_cpu_qlen_dec(sch); 1148 } else { 1149 qdisc_qstats_backlog_dec(sch, skb); 1150 sch->q.qlen--; 1151 } 1152 } else { 1153 skb = sch->dequeue(sch); 1154 } 1155 1156 return skb; 1157 } 1158 1159 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) 1160 { 1161 /* 1162 * We do not know the backlog in bytes of this list, it 1163 * is up to the caller to correct it 1164 */ 1165 ASSERT_RTNL(); 1166 if (qh->qlen) { 1167 rtnl_kfree_skbs(qh->head, qh->tail); 1168 1169 qh->head = NULL; 1170 qh->tail = NULL; 1171 qh->qlen = 0; 1172 } 1173 } 1174 1175 static inline void qdisc_reset_queue(struct Qdisc *sch) 1176 { 1177 __qdisc_reset_queue(&sch->q); 1178 sch->qstats.backlog = 0; 1179 } 1180 1181 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, 1182 struct Qdisc **pold) 1183 { 1184 struct Qdisc *old; 1185 1186 sch_tree_lock(sch); 1187 old = *pold; 1188 *pold = new; 1189 if (old != NULL) 1190 qdisc_purge_queue(old); 1191 sch_tree_unlock(sch); 1192 1193 return old; 1194 } 1195 1196 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 1197 { 1198 rtnl_kfree_skbs(skb, skb); 1199 qdisc_qstats_drop(sch); 1200 } 1201 1202 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, 1203 struct sk_buff **to_free) 1204 { 1205 __qdisc_drop(skb, to_free); 1206 qdisc_qstats_cpu_drop(sch); 1207 1208 return NET_XMIT_DROP; 1209 } 1210 1211 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 1212 struct sk_buff **to_free) 1213 { 1214 __qdisc_drop(skb, to_free); 1215 qdisc_qstats_drop(sch); 1216 1217 return NET_XMIT_DROP; 1218 } 1219 1220 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, 1221 struct sk_buff **to_free) 1222 { 1223 __qdisc_drop_all(skb, to_free); 1224 qdisc_qstats_drop(sch); 1225 1226 return NET_XMIT_DROP; 1227 } 1228 1229 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 1230 long it will take to send a packet given its size. 1231 */ 1232 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 1233 { 1234 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 1235 if (slot < 0) 1236 slot = 0; 1237 slot >>= rtab->rate.cell_log; 1238 if (slot > 255) 1239 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 1240 return rtab->data[slot]; 1241 } 1242 1243 struct psched_ratecfg { 1244 u64 rate_bytes_ps; /* bytes per second */ 1245 u32 mult; 1246 u16 overhead; 1247 u8 linklayer; 1248 u8 shift; 1249 }; 1250 1251 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 1252 unsigned int len) 1253 { 1254 len += r->overhead; 1255 1256 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) 1257 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; 1258 1259 return ((u64)len * r->mult) >> r->shift; 1260 } 1261 1262 void psched_ratecfg_precompute(struct psched_ratecfg *r, 1263 const struct tc_ratespec *conf, 1264 u64 rate64); 1265 1266 static inline void psched_ratecfg_getrate(struct tc_ratespec *res, 1267 const struct psched_ratecfg *r) 1268 { 1269 memset(res, 0, sizeof(*res)); 1270 1271 /* legacy struct tc_ratespec has a 32bit @rate field 1272 * Qdisc using 64bit rate should add new attributes 1273 * in order to maintain compatibility. 1274 */ 1275 res->rate = min_t(u64, r->rate_bytes_ps, ~0U); 1276 1277 res->overhead = r->overhead; 1278 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); 1279 } 1280 1281 struct psched_pktrate { 1282 u64 rate_pkts_ps; /* packets per second */ 1283 u32 mult; 1284 u8 shift; 1285 }; 1286 1287 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, 1288 unsigned int pkt_num) 1289 { 1290 return ((u64)pkt_num * r->mult) >> r->shift; 1291 } 1292 1293 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); 1294 1295 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. 1296 * The fast path only needs to access filter list and to update stats 1297 */ 1298 struct mini_Qdisc { 1299 struct tcf_proto *filter_list; 1300 struct tcf_block *block; 1301 struct gnet_stats_basic_sync __percpu *cpu_bstats; 1302 struct gnet_stats_queue __percpu *cpu_qstats; 1303 unsigned long rcu_state; 1304 }; 1305 1306 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, 1307 const struct sk_buff *skb) 1308 { 1309 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); 1310 } 1311 1312 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) 1313 { 1314 this_cpu_inc(miniq->cpu_qstats->drops); 1315 } 1316 1317 struct mini_Qdisc_pair { 1318 struct mini_Qdisc miniq1; 1319 struct mini_Qdisc miniq2; 1320 struct mini_Qdisc __rcu **p_miniq; 1321 }; 1322 1323 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, 1324 struct tcf_proto *tp_head); 1325 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1326 struct mini_Qdisc __rcu **p_miniq); 1327 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1328 struct tcf_block *block); 1329 1330 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1331 1332 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1333 1334 #endif 1335