1 #ifndef __NET_SCHED_GENERIC_H 2 #define __NET_SCHED_GENERIC_H 3 4 #include <linux/netdevice.h> 5 #include <linux/types.h> 6 #include <linux/rcupdate.h> 7 #include <linux/module.h> 8 #include <linux/pkt_sched.h> 9 #include <linux/pkt_cls.h> 10 #include <net/gen_stats.h> 11 #include <net/rtnetlink.h> 12 13 struct Qdisc_ops; 14 struct qdisc_walker; 15 struct tcf_walker; 16 struct module; 17 18 struct qdisc_rate_table 19 { 20 struct tc_ratespec rate; 21 u32 data[256]; 22 struct qdisc_rate_table *next; 23 int refcnt; 24 }; 25 26 struct Qdisc 27 { 28 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); 29 struct sk_buff * (*dequeue)(struct Qdisc *dev); 30 unsigned flags; 31 #define TCQ_F_BUILTIN 1 32 #define TCQ_F_THROTTLED 2 33 #define TCQ_F_INGRESS 4 34 int padded; 35 struct Qdisc_ops *ops; 36 u32 handle; 37 u32 parent; 38 atomic_t refcnt; 39 struct sk_buff_head q; 40 struct net_device *dev; 41 struct list_head list; 42 43 struct gnet_stats_basic bstats; 44 struct gnet_stats_queue qstats; 45 struct gnet_stats_rate_est rate_est; 46 spinlock_t *stats_lock; 47 struct rcu_head q_rcu; 48 int (*reshape_fail)(struct sk_buff *skb, 49 struct Qdisc *q); 50 51 /* This field is deprecated, but it is still used by CBQ 52 * and it will live until better solution will be invented. 53 */ 54 struct Qdisc *__parent; 55 }; 56 57 struct Qdisc_class_ops 58 { 59 /* Child qdisc manipulation */ 60 int (*graft)(struct Qdisc *, unsigned long cl, 61 struct Qdisc *, struct Qdisc **); 62 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 63 void (*qlen_notify)(struct Qdisc *, unsigned long); 64 65 /* Class manipulation routines */ 66 unsigned long (*get)(struct Qdisc *, u32 classid); 67 void (*put)(struct Qdisc *, unsigned long); 68 int (*change)(struct Qdisc *, u32, u32, 69 struct nlattr **, unsigned long *); 70 int (*delete)(struct Qdisc *, unsigned long); 71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 72 73 /* Filter manipulation */ 74 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 75 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 76 u32 classid); 77 void (*unbind_tcf)(struct Qdisc *, unsigned long); 78 79 /* rtnetlink specific */ 80 int (*dump)(struct Qdisc *, unsigned long, 81 struct sk_buff *skb, struct tcmsg*); 82 int (*dump_stats)(struct Qdisc *, unsigned long, 83 struct gnet_dump *); 84 }; 85 86 struct Qdisc_ops 87 { 88 struct Qdisc_ops *next; 89 const struct Qdisc_class_ops *cl_ops; 90 char id[IFNAMSIZ]; 91 int priv_size; 92 93 int (*enqueue)(struct sk_buff *, struct Qdisc *); 94 struct sk_buff * (*dequeue)(struct Qdisc *); 95 int (*requeue)(struct sk_buff *, struct Qdisc *); 96 unsigned int (*drop)(struct Qdisc *); 97 98 int (*init)(struct Qdisc *, struct nlattr *arg); 99 void (*reset)(struct Qdisc *); 100 void (*destroy)(struct Qdisc *); 101 int (*change)(struct Qdisc *, struct nlattr *arg); 102 103 int (*dump)(struct Qdisc *, struct sk_buff *); 104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 105 106 struct module *owner; 107 }; 108 109 110 struct tcf_result 111 { 112 unsigned long class; 113 u32 classid; 114 }; 115 116 struct tcf_proto_ops 117 { 118 struct tcf_proto_ops *next; 119 char kind[IFNAMSIZ]; 120 121 int (*classify)(struct sk_buff*, struct tcf_proto*, 122 struct tcf_result *); 123 int (*init)(struct tcf_proto*); 124 void (*destroy)(struct tcf_proto*); 125 126 unsigned long (*get)(struct tcf_proto*, u32 handle); 127 void (*put)(struct tcf_proto*, unsigned long); 128 int (*change)(struct tcf_proto*, unsigned long, 129 u32 handle, struct nlattr **, 130 unsigned long *); 131 int (*delete)(struct tcf_proto*, unsigned long); 132 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 133 134 /* rtnetlink specific */ 135 int (*dump)(struct tcf_proto*, unsigned long, 136 struct sk_buff *skb, struct tcmsg*); 137 138 struct module *owner; 139 }; 140 141 struct tcf_proto 142 { 143 /* Fast access part */ 144 struct tcf_proto *next; 145 void *root; 146 int (*classify)(struct sk_buff*, struct tcf_proto*, 147 struct tcf_result *); 148 __be16 protocol; 149 150 /* All the rest */ 151 u32 prio; 152 u32 classid; 153 struct Qdisc *q; 154 void *data; 155 struct tcf_proto_ops *ops; 156 }; 157 158 159 extern void qdisc_lock_tree(struct net_device *dev); 160 extern void qdisc_unlock_tree(struct net_device *dev); 161 162 #define sch_tree_lock(q) qdisc_lock_tree((q)->dev) 163 #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev) 164 #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev) 165 #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev) 166 167 extern struct Qdisc noop_qdisc; 168 extern struct Qdisc_ops noop_qdisc_ops; 169 170 extern void dev_init_scheduler(struct net_device *dev); 171 extern void dev_shutdown(struct net_device *dev); 172 extern void dev_activate(struct net_device *dev); 173 extern void dev_deactivate(struct net_device *dev); 174 extern void qdisc_reset(struct Qdisc *qdisc); 175 extern void qdisc_destroy(struct Qdisc *qdisc); 176 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); 177 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops); 178 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, 179 struct Qdisc_ops *ops, u32 parentid); 180 extern void tcf_destroy(struct tcf_proto *tp); 181 extern void tcf_destroy_chain(struct tcf_proto *fl); 182 183 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 184 struct sk_buff_head *list) 185 { 186 __skb_queue_tail(list, skb); 187 sch->qstats.backlog += skb->len; 188 sch->bstats.bytes += skb->len; 189 sch->bstats.packets++; 190 191 return NET_XMIT_SUCCESS; 192 } 193 194 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) 195 { 196 return __qdisc_enqueue_tail(skb, sch, &sch->q); 197 } 198 199 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 200 struct sk_buff_head *list) 201 { 202 struct sk_buff *skb = __skb_dequeue(list); 203 204 if (likely(skb != NULL)) 205 sch->qstats.backlog -= skb->len; 206 207 return skb; 208 } 209 210 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 211 { 212 return __qdisc_dequeue_head(sch, &sch->q); 213 } 214 215 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 216 struct sk_buff_head *list) 217 { 218 struct sk_buff *skb = __skb_dequeue_tail(list); 219 220 if (likely(skb != NULL)) 221 sch->qstats.backlog -= skb->len; 222 223 return skb; 224 } 225 226 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) 227 { 228 return __qdisc_dequeue_tail(sch, &sch->q); 229 } 230 231 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch, 232 struct sk_buff_head *list) 233 { 234 __skb_queue_head(list, skb); 235 sch->qstats.backlog += skb->len; 236 sch->qstats.requeues++; 237 238 return NET_XMIT_SUCCESS; 239 } 240 241 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch) 242 { 243 return __qdisc_requeue(skb, sch, &sch->q); 244 } 245 246 static inline void __qdisc_reset_queue(struct Qdisc *sch, 247 struct sk_buff_head *list) 248 { 249 /* 250 * We do not know the backlog in bytes of this list, it 251 * is up to the caller to correct it 252 */ 253 skb_queue_purge(list); 254 } 255 256 static inline void qdisc_reset_queue(struct Qdisc *sch) 257 { 258 __qdisc_reset_queue(sch, &sch->q); 259 sch->qstats.backlog = 0; 260 } 261 262 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, 263 struct sk_buff_head *list) 264 { 265 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 266 267 if (likely(skb != NULL)) { 268 unsigned int len = skb->len; 269 kfree_skb(skb); 270 return len; 271 } 272 273 return 0; 274 } 275 276 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) 277 { 278 return __qdisc_queue_drop(sch, &sch->q); 279 } 280 281 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 282 { 283 kfree_skb(skb); 284 sch->qstats.drops++; 285 286 return NET_XMIT_DROP; 287 } 288 289 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 290 { 291 sch->qstats.drops++; 292 293 #ifdef CONFIG_NET_CLS_ACT 294 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 295 goto drop; 296 297 return NET_XMIT_SUCCESS; 298 299 drop: 300 #endif 301 kfree_skb(skb); 302 return NET_XMIT_DROP; 303 } 304 305 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 306 long it will take to send a packet given its size. 307 */ 308 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) 309 { 310 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; 311 if (slot < 0) 312 slot = 0; 313 slot >>= rtab->rate.cell_log; 314 if (slot > 255) 315 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); 316 return rtab->data[slot]; 317 } 318 319 #ifdef CONFIG_NET_CLS_ACT 320 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) 321 { 322 struct sk_buff *n = skb_clone(skb, gfp_mask); 323 324 if (n) { 325 n->tc_verd = SET_TC_VERD(n->tc_verd, 0); 326 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 327 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 328 } 329 return n; 330 } 331 #endif 332 333 #endif 334