1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/module.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <net/gen_stats.h> 11 #include <net/rtnetlink.h> 12 13 struct Qdisc_ops; 14 struct qdisc_walker; 15 struct tcf_walker; 16 struct module; 17 18 struct qdisc_rate_table 19 { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 enum qdisc_state_t 27 { 28 __QDISC_STATE_RUNNING, 29 __QDISC_STATE_SCHED, 30 }; 31 32 struct qdisc_size_table { 33 struct list_head list; 34 struct tc_sizespec szopts; 35 int refcnt; 36 u16 data[]; 37 }; 38 39 struct Qdisc 40 { 41 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 42 struct sk_buff * (*dequeue)(struct Qdisc *dev); 43 unsigned flags; 44 #define TCQ_F_BUILTIN 1 45 #define TCQ_F_THROTTLED 2 46 #define TCQ_F_INGRESS 4 47 int padded; 48 struct Qdisc_ops *ops; 49 struct qdisc_size_table *stab; 50 u32 handle; 51 u32 parent; 52 atomic_t refcnt; 53 unsigned long state; 54 struct sk_buff *gso_skb; 55 struct sk_buff_head q; 56 struct netdev_queue *dev_queue; 57 struct Qdisc *next_sched; 58 struct list_head list; 59 60 struct gnet_stats_basic bstats; 61 struct gnet_stats_queue qstats; 62 struct gnet_stats_rate_est rate_est; 63 struct rcu_head q_rcu; 64 int (*reshape_fail)(struct sk_buff *skb, 65 struct Qdisc *q); 66 67 void *u32_node; 68 69 /* This field is deprecated, but it is still used by CBQ 70 * and it will live until better solution will be invented. 71 */ 72 struct Qdisc *__parent; 73 }; 74 75 struct Qdisc_class_ops 76 { 77 /* Child qdisc manipulation */ 78 int (*graft)(struct Qdisc *, unsigned long cl, 79 struct Qdisc *, struct Qdisc **); 80 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 81 void (*qlen_notify)(struct Qdisc *, unsigned long); 82 83 /* Class manipulation routines */ 84 unsigned long (*get)(struct Qdisc *, u32 classid); 85 void (*put)(struct Qdisc *, unsigned long); 86 int (*change)(struct Qdisc *, u32, u32, 87 struct nlattr **, unsigned long *); 88 int (*delete)(struct Qdisc *, unsigned long); 89 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 90 91 /* Filter manipulation */ 92 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 93 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 94 u32 classid); 95 void (*unbind_tcf)(struct Qdisc *, unsigned long); 96 97 /* rtnetlink specific */ 98 int (*dump)(struct Qdisc *, unsigned long, 99 struct sk_buff *skb, struct tcmsg*); 100 int (*dump_stats)(struct Qdisc *, unsigned long, 101 struct gnet_dump *); 102 }; 103 104 struct Qdisc_ops 105 { 106 struct Qdisc_ops *next; 107 const struct Qdisc_class_ops *cl_ops; 108 char id[IFNAMSIZ]; 109 int priv_size; 110 111 int (*enqueue)(struct sk_buff *, struct Qdisc *); 112 struct sk_buff * (*dequeue)(struct Qdisc *); 113 int (*requeue)(struct sk_buff *, struct Qdisc *); 114 unsigned int (*drop)(struct Qdisc *); 115 116 int (*init)(struct Qdisc *, struct nlattr *arg); 117 void (*reset)(struct Qdisc *); 118 void (*destroy)(struct Qdisc *); 119 int (*change)(struct Qdisc *, struct nlattr *arg); 120 121 int (*dump)(struct Qdisc *, struct sk_buff *); 122 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 123 124 struct module *owner; 125 }; 126 127 128 struct tcf_result 129 { 130 unsigned long class; 131 u32 classid; 132 }; 133 134 struct tcf_proto_ops 135 { 136 struct tcf_proto_ops *next; 137 char kind[IFNAMSIZ]; 138 139 int (*classify)(struct sk_buff*, struct tcf_proto*, 140 struct tcf_result *); 141 int (*init)(struct tcf_proto*); 142 void (*destroy)(struct tcf_proto*); 143 144 unsigned long (*get)(struct tcf_proto*, u32 handle); 145 void (*put)(struct tcf_proto*, unsigned long); 146 int (*change)(struct tcf_proto*, unsigned long, 147 u32 handle, struct nlattr **, 148 unsigned long *); 149 int (*delete)(struct tcf_proto*, unsigned long); 150 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 151 152 /* rtnetlink specific */ 153 int (*dump)(struct tcf_proto*, unsigned long, 154 struct sk_buff *skb, struct tcmsg*); 155 156 struct module *owner; 157 }; 158 159 struct tcf_proto 160 { 161 /* Fast access part */ 162 struct tcf_proto *next; 163 void *root; 164 int (*classify)(struct sk_buff*, struct tcf_proto*, 165 struct tcf_result *); 166 __be16 protocol; 167 168 /* All the rest */ 169 u32 prio; 170 u32 classid; 171 struct Qdisc *q; 172 void *data; 173 struct tcf_proto_ops *ops; 174 }; 175 176 struct qdisc_skb_cb { 177 unsigned int pkt_len; 178 char data[]; 179 }; 180 181 static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) 182 { 183 return (struct qdisc_skb_cb *)skb->cb; 184 } 185 186 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) 187 { 188 return &qdisc->q.lock; 189 } 190 191 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) 192 { 193 return qdisc->dev_queue->qdisc; 194 } 195 196 static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) 197 { 198 struct Qdisc *root = qdisc_root(qdisc); 199 200 return qdisc_lock(root); 201 } 202 203 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 204 { 205 return qdisc->dev_queue->dev; 206 } 207 208 static inline void sch_tree_lock(struct Qdisc *q) 209 { 210 spin_lock_bh(qdisc_root_lock(q)); 211 } 212 213 static inline void sch_tree_unlock(struct Qdisc *q) 214 { 215 spin_unlock_bh(qdisc_root_lock(q)); 216 } 217 218 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) 219 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) 220 221 extern struct Qdisc noop_qdisc; 222 extern struct Qdisc_ops noop_qdisc_ops; 223 224 struct Qdisc_class_common 225 { 226 u32 classid; 227 struct hlist_node hnode; 228 }; 229 230 struct Qdisc_class_hash 231 { 232 struct hlist_head *hash; 233 unsigned int hashsize; 234 unsigned int hashmask; 235 unsigned int hashelems; 236 }; 237 238 static inline unsigned int qdisc_class_hash(u32 id, u32 mask) 239 { 240 id ^= id >> 8; 241 id ^= id >> 4; 242 return id & mask; 243 } 244 245 static inline struct Qdisc_class_common * 246 qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) 247 { 248 struct Qdisc_class_common *cl; 249 struct hlist_node *n; 250 unsigned int h; 251 252 h = qdisc_class_hash(id, hash->hashmask); 253 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { 254 if (cl->classid == id) 255 return cl; 256 } 257 return NULL; 258 } 259 260 extern int qdisc_class_hash_init(struct Qdisc_class_hash *); 261 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); 262 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); 263 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 264 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 265 266 extern void dev_init_scheduler(struct net_device *dev); 267 extern void dev_shutdown(struct net_device *dev); 268 extern void dev_activate(struct net_device *dev); 269 extern void dev_deactivate(struct net_device *dev); 270 extern void qdisc_reset(struct Qdisc *qdisc); 271 extern void qdisc_destroy(struct Qdisc *qdisc); 272 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 273 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 274 struct Qdisc_ops *ops); 275 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, 276 struct netdev_queue *dev_queue, 277 struct Qdisc_ops *ops, u32 parentid); 278 extern void qdisc_calculate_pkt_len(struct sk_buff *skb, 279 struct qdisc_size_table *stab); 280 extern void tcf_destroy(struct tcf_proto *tp); 281 extern void tcf_destroy_chain(struct tcf_proto **fl); 282 283 /* Reset all TX qdiscs of a device. */ 284 static inline void qdisc_reset_all_tx(struct net_device *dev) 285 { 286 unsigned int i; 287 for (i = 0; i < dev->num_tx_queues; i++) 288 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc); 289 } 290 291 /* Are all TX queues of the device empty? */ 292 static inline bool qdisc_all_tx_empty(const struct net_device *dev) 293 { 294 unsigned int i; 295 for (i = 0; i < dev->num_tx_queues; i++) { 296 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 297 const struct Qdisc *q = txq->qdisc; 298 299 if (q->q.qlen) 300 return false; 301 } 302 return true; 303 } 304 305 /* Are any of the TX qdiscs changing? */ 306 static inline bool qdisc_tx_changing(struct net_device *dev) 307 { 308 unsigned int i; 309 for (i = 0; i < dev->num_tx_queues; i++) { 310 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 311 if (txq->qdisc != txq->qdisc_sleeping) 312 return true; 313 } 314 return false; 315 } 316 317 /* Is the device using the noop qdisc on all queues? */ 318 static inline bool qdisc_tx_is_noop(const struct net_device *dev) 319 { 320 unsigned int i; 321 for (i = 0; i < dev->num_tx_queues; i++) { 322 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 323 if (txq->qdisc != &noop_qdisc) 324 return false; 325 } 326 return true; 327 } 328 329 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 330 { 331 return qdisc_skb_cb(skb)->pkt_len; 332 } 333 334 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 335 { 336 #ifdef CONFIG_NET_SCHED 337 if (sch->stab) 338 qdisc_calculate_pkt_len(skb, sch->stab); 339 #endif 340 return sch->enqueue(skb, sch); 341 } 342 343 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) 344 { 345 qdisc_skb_cb(skb)->pkt_len = skb->len; 346 return qdisc_enqueue(skb, sch); 347 } 348 349 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 350 struct sk_buff_head *list) 351 { 352 __skb_queue_tail(list, skb); 353 sch->qstats.backlog += qdisc_pkt_len(skb); 354 sch->bstats.bytes += qdisc_pkt_len(skb); 355 sch->bstats.packets++; 356 357 return NET_XMIT_SUCCESS; 358 } 359 360 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 361 { 362 return __qdisc_enqueue_tail(skb, sch, &sch->q); 363 } 364 365 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 366 struct sk_buff_head *list) 367 { 368 struct sk_buff *skb = __skb_dequeue(list); 369 370 if (likely(skb != NULL)) 371 sch->qstats.backlog -= qdisc_pkt_len(skb); 372 373 return skb; 374 } 375 376 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 377 { 378 return __qdisc_dequeue_head(sch, &sch->q); 379 } 380 381 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 382 struct sk_buff_head *list) 383 { 384 struct sk_buff *skb = __skb_dequeue_tail(list); 385 386 if (likely(skb != NULL)) 387 sch->qstats.backlog -= qdisc_pkt_len(skb); 388 389 return skb; 390 } 391 392 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 393 { 394 return __qdisc_dequeue_tail(sch, &sch->q); 395 } 396 397 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, 398 struct sk_buff_head *list) 399 { 400 __skb_queue_head(list, skb); 401 sch->qstats.backlog += qdisc_pkt_len(skb); 402 sch->qstats.requeues++; 403 404 return NET_XMIT_SUCCESS; 405 } 406 407 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) 408 { 409 return __qdisc_requeue(skb, sch, &sch->q); 410 } 411 412 static inline void __qdisc_reset_queue(struct Qdisc *sch, 413 struct sk_buff_head *list) 414 { 415 /* 416 * We do not know the backlog in bytes of this list, it 417 * is up to the caller to correct it 418 */ 419 __skb_queue_purge(list); 420 } 421 422 static inline void qdisc_reset_queue(struct Qdisc *sch) 423 { 424 __qdisc_reset_queue(sch, &sch->q); 425 sch->qstats.backlog = 0; 426 } 427 428 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 429 struct sk_buff_head *list) 430 { 431 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 432 433 if (likely(skb != NULL)) { 434 unsigned int len = qdisc_pkt_len(skb); 435 kfree_skb(skb); 436 return len; 437 } 438 439 return 0; 440 } 441 442 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 443 { 444 return __qdisc_queue_drop(sch, &sch->q); 445 } 446 447 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 448 { 449 kfree_skb(skb); 450 sch->qstats.drops++; 451 452 return NET_XMIT_DROP; 453 } 454 455 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 456 { 457 sch->qstats.drops++; 458 459 #ifdef CONFIG_NET_CLS_ACT 460 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 461 goto drop; 462 463 return NET_XMIT_SUCCESS; 464 465 drop: 466 #endif 467 kfree_skb(skb); 468 return NET_XMIT_DROP; 469 } 470 471 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 472 long it will take to send a packet given its size. 473 */ 474 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 475 { 476 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 477 if (slot < 0) 478 slot = 0; 479 slot >>= rtab->rate.cell_log; 480 if (slot > 255) 481 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); 482 return rtab->data[slot]; 483 } 484 485 #ifdef CONFIG_NET_CLS_ACT 486 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) 487 { 488 struct sk_buff *n = skb_clone(skb, gfp_mask); 489 490 if (n) { 491 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 492 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 493 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 494 } 495 return n; 496 } 497 #endif 498 499 #endif 500