1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/module.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <net/gen_stats.h> 11 #include <net/rtnetlink.h> 12 13 struct Qdisc_ops; 14 struct qdisc_walker; 15 struct tcf_walker; 16 struct module; 17 18 struct qdisc_rate_table { 19 struct tc_ratespec rate; 20 u32 data[256]; 21 struct qdisc_rate_table *next; 22 int refcnt; 23 }; 24 25 enum qdisc_state_t { 26 __QDISC_STATE_SCHED, 27 __QDISC_STATE_DEACTIVATED, 28 }; 29 30 /* 31 * following bits are only changed while qdisc lock is held 32 */ 33 enum qdisc___state_t { 34 __QDISC___STATE_RUNNING, 35 }; 36 37 struct qdisc_size_table { 38 struct list_head list; 39 struct tc_sizespec szopts; 40 int refcnt; 41 u16 data[]; 42 }; 43 44 struct Qdisc { 45 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 46 struct sk_buff * (*dequeue)(struct Qdisc *dev); 47 unsigned flags; 48 #define TCQ_F_BUILTIN 1 49 #define TCQ_F_THROTTLED 2 50 #define TCQ_F_INGRESS 4 51 #define TCQ_F_CAN_BYPASS 8 52 #define TCQ_F_MQROOT 16 53 #define TCQ_F_WARN_NONWC (1 << 16) 54 int padded; 55 struct Qdisc_ops *ops; 56 struct qdisc_size_table *stab; 57 struct list_head list; 58 u32 handle; 59 u32 parent; 60 atomic_t refcnt; 61 struct gnet_stats_rate_est rate_est; 62 int (*reshape_fail)(struct sk_buff *skb, 63 struct Qdisc *q); 64 65 void *u32_node; 66 67 /* This field is deprecated, but it is still used by CBQ 68 * and it will live until better solution will be invented. 69 */ 70 struct Qdisc *__parent; 71 struct netdev_queue *dev_queue; 72 struct Qdisc *next_sched; 73 74 struct sk_buff *gso_skb; 75 /* 76 * For performance sake on SMP, we put highly modified fields at the end 77 */ 78 unsigned long state; 79 struct sk_buff_head q; 80 struct gnet_stats_basic_packed bstats; 81 unsigned long __state; 82 struct gnet_stats_queue qstats; 83 struct rcu_head rcu_head; 84 spinlock_t busylock; 85 }; 86 87 static inline bool qdisc_is_running(struct Qdisc *qdisc) 88 { 89 return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 90 } 91 92 static inline bool qdisc_run_begin(struct Qdisc *qdisc) 93 { 94 return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 95 } 96 97 static inline void qdisc_run_end(struct Qdisc *qdisc) 98 { 99 __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); 100 } 101 102 struct Qdisc_class_ops { 103 /* Child qdisc manipulation */ 104 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 105 int (*graft)(struct Qdisc *, unsigned long cl, 106 struct Qdisc *, struct Qdisc **); 107 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 108 void (*qlen_notify)(struct Qdisc *, unsigned long); 109 110 /* Class manipulation routines */ 111 unsigned long (*get)(struct Qdisc *, u32 classid); 112 void (*put)(struct Qdisc *, unsigned long); 113 int (*change)(struct Qdisc *, u32, u32, 114 struct nlattr **, unsigned long *); 115 int (*delete)(struct Qdisc *, unsigned long); 116 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 117 118 /* Filter manipulation */ 119 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 120 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 121 u32 classid); 122 void (*unbind_tcf)(struct Qdisc *, unsigned long); 123 124 /* rtnetlink specific */ 125 int (*dump)(struct Qdisc *, unsigned long, 126 struct sk_buff *skb, struct tcmsg*); 127 int (*dump_stats)(struct Qdisc *, unsigned long, 128 struct gnet_dump *); 129 }; 130 131 struct Qdisc_ops { 132 struct Qdisc_ops *next; 133 const struct Qdisc_class_ops *cl_ops; 134 char id[IFNAMSIZ]; 135 int priv_size; 136 137 int (*enqueue)(struct sk_buff *, struct Qdisc *); 138 struct sk_buff * (*dequeue)(struct Qdisc *); 139 struct sk_buff * (*peek)(struct Qdisc *); 140 unsigned int (*drop)(struct Qdisc *); 141 142 int (*init)(struct Qdisc *, struct nlattr *arg); 143 void (*reset)(struct Qdisc *); 144 void (*destroy)(struct Qdisc *); 145 int (*change)(struct Qdisc *, struct nlattr *arg); 146 void (*attach)(struct Qdisc *); 147 148 int (*dump)(struct Qdisc *, struct sk_buff *); 149 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 150 151 struct module *owner; 152 }; 153 154 155 struct tcf_result { 156 unsigned long class; 157 u32 classid; 158 }; 159 160 struct tcf_proto_ops { 161 struct tcf_proto_ops *next; 162 char kind[IFNAMSIZ]; 163 164 int (*classify)(struct sk_buff*, struct tcf_proto*, 165 struct tcf_result *); 166 int (*init)(struct tcf_proto*); 167 void (*destroy)(struct tcf_proto*); 168 169 unsigned long (*get)(struct tcf_proto*, u32 handle); 170 void (*put)(struct tcf_proto*, unsigned long); 171 int (*change)(struct tcf_proto*, unsigned long, 172 u32 handle, struct nlattr **, 173 unsigned long *); 174 int (*delete)(struct tcf_proto*, unsigned long); 175 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 176 177 /* rtnetlink specific */ 178 int (*dump)(struct tcf_proto*, unsigned long, 179 struct sk_buff *skb, struct tcmsg*); 180 181 struct module *owner; 182 }; 183 184 struct tcf_proto { 185 /* Fast access part */ 186 struct tcf_proto *next; 187 void *root; 188 int (*classify)(struct sk_buff*, struct tcf_proto*, 189 struct tcf_result *); 190 __be16 protocol; 191 192 /* All the rest */ 193 u32 prio; 194 u32 classid; 195 struct Qdisc *q; 196 void *data; 197 struct tcf_proto_ops *ops; 198 }; 199 200 struct qdisc_skb_cb { 201 unsigned int pkt_len; 202 long data[]; 203 }; 204 205 static inline int qdisc_qlen(struct Qdisc *q) 206 { 207 return q->q.qlen; 208 } 209 210 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 211 { 212 return (struct qdisc_skb_cb *)skb->cb; 213 } 214 215 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 216 { 217 return &qdisc->q.lock; 218 } 219 220 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) 221 { 222 return qdisc->dev_queue->qdisc; 223 } 224 225 static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) 226 { 227 return qdisc->dev_queue->qdisc_sleeping; 228 } 229 230 /* The qdisc root lock is a mechanism by which to top level 231 * of a qdisc tree can be locked from any qdisc node in the 232 * forest. This allows changing the configuration of some 233 * aspect of the qdisc tree while blocking out asynchronous 234 * qdisc access in the packet processing paths. 235 * 236 * It is only legal to do this when the root will not change 237 * on us. Otherwise we'll potentially lock the wrong qdisc 238 * root. This is enforced by holding the RTNL semaphore, which 239 * all users of this lock accessor must do. 240 */ 241 static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) 242 { 243 struct Qdisc *root = qdisc_root(qdisc); 244 245 ASSERT_RTNL(); 246 return qdisc_lock(root); 247 } 248 249 static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) 250 { 251 struct Qdisc *root = qdisc_root_sleeping(qdisc); 252 253 ASSERT_RTNL(); 254 return qdisc_lock(root); 255 } 256 257 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 258 { 259 return qdisc->dev_queue->dev; 260 } 261 262 static inline void sch_tree_lock(struct Qdisc *q) 263 { 264 spin_lock_bh(qdisc_root_sleeping_lock(q)); 265 } 266 267 static inline void sch_tree_unlock(struct Qdisc *q) 268 { 269 spin_unlock_bh(qdisc_root_sleeping_lock(q)); 270 } 271 272 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 273 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 274 275 extern struct Qdisc noop_qdisc; 276 extern struct Qdisc_ops noop_qdisc_ops; 277 extern struct Qdisc_ops pfifo_fast_ops; 278 extern struct Qdisc_ops mq_qdisc_ops; 279 280 struct Qdisc_class_common { 281 u32 classid; 282 struct hlist_node hnode; 283 }; 284 285 struct Qdisc_class_hash { 286 struct hlist_head *hash; 287 unsigned int hashsize; 288 unsigned int hashmask; 289 unsigned int hashelems; 290 }; 291 292 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 293 { 294 id ^= id >> 8; 295 id ^= id >> 4; 296 return id & mask; 297 } 298 299 static inline struct Qdisc_class_common * 300 qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) 301 { 302 struct Qdisc_class_common *cl; 303 struct hlist_node *n; 304 unsigned int h; 305 306 h = qdisc_class_hash(id, hash->hashmask); 307 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 308 if (cl->classid == id) 309 return cl; 310 } 311 return NULL; 312 } 313 314 extern int qdisc_class_hash_init(struct Qdisc_class_hash *); 315 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); 316 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); 317 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 318 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 319 320 extern void dev_init_scheduler(struct net_device *dev); 321 extern void dev_shutdown(struct net_device *dev); 322 extern void dev_activate(struct net_device *dev); 323 extern void dev_deactivate(struct net_device *dev); 324 extern void dev_deactivate_many(struct list_head *head); 325 extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 326 struct Qdisc *qdisc); 327 extern void qdisc_reset(struct Qdisc *qdisc); 328 extern void qdisc_destroy(struct Qdisc *qdisc); 329 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 330 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 331 struct Qdisc_ops *ops); 332 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 333 struct Qdisc_ops *ops, u32 parentid); 334 extern void qdisc_calculate_pkt_len(struct sk_buff *skb, 335 struct qdisc_size_table *stab); 336 extern void tcf_destroy(struct tcf_proto *tp); 337 extern void tcf_destroy_chain(struct tcf_proto **fl); 338 339 /* Reset all TX qdiscs greater then index of a device. */ 340 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 341 { 342 struct Qdisc *qdisc; 343 344 for (; i < dev->num_tx_queues; i++) { 345 qdisc = netdev_get_tx_queue(dev, i)->qdisc; 346 if (qdisc) { 347 spin_lock_bh(qdisc_lock(qdisc)); 348 qdisc_reset(qdisc); 349 spin_unlock_bh(qdisc_lock(qdisc)); 350 } 351 } 352 } 353 354 static inline void qdisc_reset_all_tx(struct net_device *dev) 355 { 356 qdisc_reset_all_tx_gt(dev, 0); 357 } 358 359 /* Are all TX queues of the device empty? */ 360 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 361 { 362 unsigned int i; 363 for (i = 0; i < dev->num_tx_queues; i++) { 364 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 365 const struct Qdisc *q = txq->qdisc; 366 367 if (q->q.qlen) 368 return false; 369 } 370 return true; 371 } 372 373 /* Are any of the TX qdiscs changing? */ 374 static inline bool qdisc_tx_changing(struct net_device *dev) 375 { 376 unsigned int i; 377 for (i = 0; i < dev->num_tx_queues; i++) { 378 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 379 if (txq->qdisc != txq->qdisc_sleeping) 380 return true; 381 } 382 return false; 383 } 384 385 /* Is the device using the noop qdisc on all queues? */ 386 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 387 { 388 unsigned int i; 389 for (i = 0; i < dev->num_tx_queues; i++) { 390 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 391 if (txq->qdisc != &noop_qdisc) 392 return false; 393 } 394 return true; 395 } 396 397 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 398 { 399 return qdisc_skb_cb(skb)->pkt_len; 400 } 401 402 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ 403 enum net_xmit_qdisc_t { 404 __NET_XMIT_STOLEN = 0x00010000, 405 __NET_XMIT_BYPASS = 0x00020000, 406 }; 407 408 #ifdef CONFIG_NET_CLS_ACT 409 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) 410 #else 411 #define net_xmit_drop_count(e) (1) 412 #endif 413 414 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 415 { 416 #ifdef CONFIG_NET_SCHED 417 if (sch->stab) 418 qdisc_calculate_pkt_len(skb, sch->stab); 419 #endif 420 return sch->enqueue(skb, sch); 421 } 422 423 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) 424 { 425 qdisc_skb_cb(skb)->pkt_len = skb->len; 426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 427 } 428 429 430 static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 431 const struct sk_buff *skb) 432 { 433 bstats->bytes += qdisc_pkt_len(skb); 434 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 } 436 437 static inline void qdisc_bstats_update(struct Qdisc *sch, 438 const struct sk_buff *skb) 439 { 440 bstats_update(&sch->bstats, skb); 441 } 442 443 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 444 struct sk_buff_head *list) 445 { 446 __skb_queue_tail(list, skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb); 448 449 return NET_XMIT_SUCCESS; 450 } 451 452 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 453 { 454 return __qdisc_enqueue_tail(skb, sch, &sch->q); 455 } 456 457 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 458 struct sk_buff_head *list) 459 { 460 struct sk_buff *skb = __skb_dequeue(list); 461 462 if (likely(skb != NULL)) { 463 sch->qstats.backlog -= qdisc_pkt_len(skb); 464 qdisc_bstats_update(sch, skb); 465 } 466 467 return skb; 468 } 469 470 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 471 { 472 return __qdisc_dequeue_head(sch, &sch->q); 473 } 474 475 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 476 struct sk_buff_head *list) 477 { 478 struct sk_buff *skb = __skb_dequeue(list); 479 480 if (likely(skb != NULL)) { 481 unsigned int len = qdisc_pkt_len(skb); 482 sch->qstats.backlog -= len; 483 kfree_skb(skb); 484 return len; 485 } 486 487 return 0; 488 } 489 490 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) 491 { 492 return __qdisc_queue_drop_head(sch, &sch->q); 493 } 494 495 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 496 struct sk_buff_head *list) 497 { 498 struct sk_buff *skb = __skb_dequeue_tail(list); 499 500 if (likely(skb != NULL)) 501 sch->qstats.backlog -= qdisc_pkt_len(skb); 502 503 return skb; 504 } 505 506 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 507 { 508 return __qdisc_dequeue_tail(sch, &sch->q); 509 } 510 511 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 512 { 513 return skb_peek(&sch->q); 514 } 515 516 /* generic pseudo peek method for non-work-conserving qdisc */ 517 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 518 { 519 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 520 if (!sch->gso_skb) { 521 sch->gso_skb = sch->dequeue(sch); 522 if (sch->gso_skb) 523 /* it's still part of the queue */ 524 sch->q.qlen++; 525 } 526 527 return sch->gso_skb; 528 } 529 530 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 531 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 532 { 533 struct sk_buff *skb = sch->gso_skb; 534 535 if (skb) { 536 sch->gso_skb = NULL; 537 sch->q.qlen--; 538 } else { 539 skb = sch->dequeue(sch); 540 } 541 542 return skb; 543 } 544 545 static inline void __qdisc_reset_queue(struct Qdisc *sch, 546 struct sk_buff_head *list) 547 { 548 /* 549 * We do not know the backlog in bytes of this list, it 550 * is up to the caller to correct it 551 */ 552 __skb_queue_purge(list); 553 } 554 555 static inline void qdisc_reset_queue(struct Qdisc *sch) 556 { 557 __qdisc_reset_queue(sch, &sch->q); 558 sch->qstats.backlog = 0; 559 } 560 561 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 562 struct sk_buff_head *list) 563 { 564 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 565 566 if (likely(skb != NULL)) { 567 unsigned int len = qdisc_pkt_len(skb); 568 kfree_skb(skb); 569 return len; 570 } 571 572 return 0; 573 } 574 575 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 576 { 577 return __qdisc_queue_drop(sch, &sch->q); 578 } 579 580 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 581 { 582 kfree_skb(skb); 583 sch->qstats.drops++; 584 585 return NET_XMIT_DROP; 586 } 587 588 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 589 { 590 sch->qstats.drops++; 591 592 #ifdef CONFIG_NET_CLS_ACT 593 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 594 goto drop; 595 596 return NET_XMIT_SUCCESS; 597 598 drop: 599 #endif 600 kfree_skb(skb); 601 return NET_XMIT_DROP; 602 } 603 604 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 605 long it will take to send a packet given its size. 606 */ 607 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 608 { 609 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 610 if (slot < 0) 611 slot = 0; 612 slot >>= rtab->rate.cell_log; 613 if (slot > 255) 614 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; 615 return rtab->data[slot]; 616 } 617 618 #ifdef CONFIG_NET_CLS_ACT 619 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, 620 int action) 621 { 622 struct sk_buff *n; 623 624 n = skb_clone(skb, gfp_mask); 625 626 if (n) { 627 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 628 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 629 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 630 } 631 return n; 632 } 633 #endif 634 635 #endif 636