1 /* 2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/config.h> 13 #include <linux/module.h> 14 #include <asm/uaccess.h> 15 #include <asm/system.h> 16 #include <linux/bitops.h> 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/jiffies.h> 20 #include <linux/string.h> 21 #include <linux/mm.h> 22 #include <linux/socket.h> 23 #include <linux/sockios.h> 24 #include <linux/in.h> 25 #include <linux/errno.h> 26 #include <linux/interrupt.h> 27 #include <linux/if_ether.h> 28 #include <linux/inet.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/notifier.h> 32 #include <linux/init.h> 33 #include <net/ip.h> 34 #include <linux/ipv6.h> 35 #include <net/route.h> 36 #include <linux/skbuff.h> 37 #include <net/sock.h> 38 #include <net/pkt_sched.h> 39 40 41 /* Stochastic Fairness Queuing algorithm. 42 ======================================= 43 44 Source: 45 Paul E. McKenney "Stochastic Fairness Queuing", 46 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990. 47 48 Paul E. McKenney "Stochastic Fairness Queuing", 49 "Interworking: Research and Experience", v.2, 1991, p.113-131. 50 51 52 See also: 53 M. Shreedhar and George Varghese "Efficient Fair 54 Queuing using Deficit Round Robin", Proc. SIGCOMM 95. 55 56 57 This is not the thing that is usually called (W)FQ nowadays. 58 It does not use any timestamp mechanism, but instead 59 processes queues in round-robin order. 60 61 ADVANTAGE: 62 63 - It is very cheap. Both CPU and memory requirements are minimal. 64 65 DRAWBACKS: 66 67 - "Stochastic" -> It is not 100% fair. 68 When hash collisions occur, several flows are considered as one. 69 70 - "Round-robin" -> It introduces larger delays than virtual clock 71 based schemes, and should not be used for isolating interactive 72 traffic from non-interactive. It means, that this scheduler 73 should be used as leaf of CBQ or P3, which put interactive traffic 74 to higher priority band. 75 76 We still need true WFQ for top level CSZ, but using WFQ 77 for the best effort traffic is absolutely pointless: 78 SFQ is superior for this purpose. 79 80 IMPLEMENTATION: 81 This implementation limits maximal queue length to 128; 82 maximal mtu to 2^15-1; number of hash buckets to 1024. 83 The only goal of this restrictions was that all data 84 fit into one 4K page :-). Struct sfq_sched_data is 85 organized in anti-cache manner: all the data for a bucket 86 are scattered over different locations. This is not good, 87 but it allowed me to put it into 4K. 88 89 It is easy to increase these values, but not in flight. */ 90 91 #define SFQ_DEPTH 128 92 #define SFQ_HASH_DIVISOR 1024 93 94 /* This type should contain at least SFQ_DEPTH*2 values */ 95 typedef unsigned char sfq_index; 96 97 struct sfq_head 98 { 99 sfq_index next; 100 sfq_index prev; 101 }; 102 103 struct sfq_sched_data 104 { 105 /* Parameters */ 106 int perturb_period; 107 unsigned quantum; /* Allotment per round: MUST BE >= MTU */ 108 int limit; 109 110 /* Variables */ 111 struct timer_list perturb_timer; 112 int perturbation; 113 sfq_index tail; /* Index of current slot in round */ 114 sfq_index max_depth; /* Maximal depth */ 115 116 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ 117 sfq_index next[SFQ_DEPTH]; /* Active slots link */ 118 short allot[SFQ_DEPTH]; /* Current allotment per slot */ 119 unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */ 120 struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */ 121 struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */ 122 }; 123 124 static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 125 { 126 int pert = q->perturbation; 127 128 /* Have we any rotation primitives? If not, WHY? */ 129 h ^= (h1<<pert) ^ (h1>>(0x1F - pert)); 130 h ^= h>>10; 131 return h & 0x3FF; 132 } 133 134 static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) 135 { 136 u32 h, h2; 137 138 switch (skb->protocol) { 139 case __constant_htons(ETH_P_IP): 140 { 141 struct iphdr *iph = skb->nh.iph; 142 h = iph->daddr; 143 h2 = iph->saddr^iph->protocol; 144 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 145 (iph->protocol == IPPROTO_TCP || 146 iph->protocol == IPPROTO_UDP || 147 iph->protocol == IPPROTO_ESP)) 148 h2 ^= *(((u32*)iph) + iph->ihl); 149 break; 150 } 151 case __constant_htons(ETH_P_IPV6): 152 { 153 struct ipv6hdr *iph = skb->nh.ipv6h; 154 h = iph->daddr.s6_addr32[3]; 155 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 156 if (iph->nexthdr == IPPROTO_TCP || 157 iph->nexthdr == IPPROTO_UDP || 158 iph->nexthdr == IPPROTO_ESP) 159 h2 ^= *(u32*)&iph[1]; 160 break; 161 } 162 default: 163 h = (u32)(unsigned long)skb->dst^skb->protocol; 164 h2 = (u32)(unsigned long)skb->sk; 165 } 166 return sfq_fold_hash(q, h, h2); 167 } 168 169 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) 170 { 171 sfq_index p, n; 172 int d = q->qs[x].qlen + SFQ_DEPTH; 173 174 p = d; 175 n = q->dep[d].next; 176 q->dep[x].next = n; 177 q->dep[x].prev = p; 178 q->dep[p].next = q->dep[n].prev = x; 179 } 180 181 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) 182 { 183 sfq_index p, n; 184 185 n = q->dep[x].next; 186 p = q->dep[x].prev; 187 q->dep[p].next = n; 188 q->dep[n].prev = p; 189 190 if (n == p && q->max_depth == q->qs[x].qlen + 1) 191 q->max_depth--; 192 193 sfq_link(q, x); 194 } 195 196 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x) 197 { 198 sfq_index p, n; 199 int d; 200 201 n = q->dep[x].next; 202 p = q->dep[x].prev; 203 q->dep[p].next = n; 204 q->dep[n].prev = p; 205 d = q->qs[x].qlen; 206 if (q->max_depth < d) 207 q->max_depth = d; 208 209 sfq_link(q, x); 210 } 211 212 static unsigned int sfq_drop(struct Qdisc *sch) 213 { 214 struct sfq_sched_data *q = qdisc_priv(sch); 215 sfq_index d = q->max_depth; 216 struct sk_buff *skb; 217 unsigned int len; 218 219 /* Queue is full! Find the longest slot and 220 drop a packet from it */ 221 222 if (d > 1) { 223 sfq_index x = q->dep[d+SFQ_DEPTH].next; 224 skb = q->qs[x].prev; 225 len = skb->len; 226 __skb_unlink(skb, &q->qs[x]); 227 kfree_skb(skb); 228 sfq_dec(q, x); 229 sch->q.qlen--; 230 sch->qstats.drops++; 231 return len; 232 } 233 234 if (d == 1) { 235 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ 236 d = q->next[q->tail]; 237 q->next[q->tail] = q->next[d]; 238 q->allot[q->next[d]] += q->quantum; 239 skb = q->qs[d].prev; 240 len = skb->len; 241 __skb_unlink(skb, &q->qs[d]); 242 kfree_skb(skb); 243 sfq_dec(q, d); 244 sch->q.qlen--; 245 q->ht[q->hash[d]] = SFQ_DEPTH; 246 sch->qstats.drops++; 247 return len; 248 } 249 250 return 0; 251 } 252 253 static int 254 sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) 255 { 256 struct sfq_sched_data *q = qdisc_priv(sch); 257 unsigned hash = sfq_hash(q, skb); 258 sfq_index x; 259 260 x = q->ht[hash]; 261 if (x == SFQ_DEPTH) { 262 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 263 q->hash[x] = hash; 264 } 265 __skb_queue_tail(&q->qs[x], skb); 266 sfq_inc(q, x); 267 if (q->qs[x].qlen == 1) { /* The flow is new */ 268 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 269 q->tail = x; 270 q->next[x] = x; 271 q->allot[x] = q->quantum; 272 } else { 273 q->next[x] = q->next[q->tail]; 274 q->next[q->tail] = x; 275 q->tail = x; 276 } 277 } 278 if (++sch->q.qlen < q->limit-1) { 279 sch->bstats.bytes += skb->len; 280 sch->bstats.packets++; 281 return 0; 282 } 283 284 sfq_drop(sch); 285 return NET_XMIT_CN; 286 } 287 288 static int 289 sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) 290 { 291 struct sfq_sched_data *q = qdisc_priv(sch); 292 unsigned hash = sfq_hash(q, skb); 293 sfq_index x; 294 295 x = q->ht[hash]; 296 if (x == SFQ_DEPTH) { 297 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 298 q->hash[x] = hash; 299 } 300 __skb_queue_head(&q->qs[x], skb); 301 sfq_inc(q, x); 302 if (q->qs[x].qlen == 1) { /* The flow is new */ 303 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 304 q->tail = x; 305 q->next[x] = x; 306 q->allot[x] = q->quantum; 307 } else { 308 q->next[x] = q->next[q->tail]; 309 q->next[q->tail] = x; 310 q->tail = x; 311 } 312 } 313 if (++sch->q.qlen < q->limit - 1) { 314 sch->qstats.requeues++; 315 return 0; 316 } 317 318 sch->qstats.drops++; 319 sfq_drop(sch); 320 return NET_XMIT_CN; 321 } 322 323 324 325 326 static struct sk_buff * 327 sfq_dequeue(struct Qdisc* sch) 328 { 329 struct sfq_sched_data *q = qdisc_priv(sch); 330 struct sk_buff *skb; 331 sfq_index a, old_a; 332 333 /* No active slots */ 334 if (q->tail == SFQ_DEPTH) 335 return NULL; 336 337 a = old_a = q->next[q->tail]; 338 339 /* Grab packet */ 340 skb = __skb_dequeue(&q->qs[a]); 341 sfq_dec(q, a); 342 sch->q.qlen--; 343 344 /* Is the slot empty? */ 345 if (q->qs[a].qlen == 0) { 346 q->ht[q->hash[a]] = SFQ_DEPTH; 347 a = q->next[a]; 348 if (a == old_a) { 349 q->tail = SFQ_DEPTH; 350 return skb; 351 } 352 q->next[q->tail] = a; 353 q->allot[a] += q->quantum; 354 } else if ((q->allot[a] -= skb->len) <= 0) { 355 q->tail = a; 356 a = q->next[a]; 357 q->allot[a] += q->quantum; 358 } 359 return skb; 360 } 361 362 static void 363 sfq_reset(struct Qdisc* sch) 364 { 365 struct sk_buff *skb; 366 367 while ((skb = sfq_dequeue(sch)) != NULL) 368 kfree_skb(skb); 369 } 370 371 static void sfq_perturbation(unsigned long arg) 372 { 373 struct Qdisc *sch = (struct Qdisc*)arg; 374 struct sfq_sched_data *q = qdisc_priv(sch); 375 376 q->perturbation = net_random()&0x1F; 377 378 if (q->perturb_period) { 379 q->perturb_timer.expires = jiffies + q->perturb_period; 380 add_timer(&q->perturb_timer); 381 } 382 } 383 384 static int sfq_change(struct Qdisc *sch, struct rtattr *opt) 385 { 386 struct sfq_sched_data *q = qdisc_priv(sch); 387 struct tc_sfq_qopt *ctl = RTA_DATA(opt); 388 389 if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) 390 return -EINVAL; 391 392 sch_tree_lock(sch); 393 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 394 q->perturb_period = ctl->perturb_period*HZ; 395 if (ctl->limit) 396 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); 397 398 while (sch->q.qlen >= q->limit-1) 399 sfq_drop(sch); 400 401 del_timer(&q->perturb_timer); 402 if (q->perturb_period) { 403 q->perturb_timer.expires = jiffies + q->perturb_period; 404 add_timer(&q->perturb_timer); 405 } 406 sch_tree_unlock(sch); 407 return 0; 408 } 409 410 static int sfq_init(struct Qdisc *sch, struct rtattr *opt) 411 { 412 struct sfq_sched_data *q = qdisc_priv(sch); 413 int i; 414 415 init_timer(&q->perturb_timer); 416 q->perturb_timer.data = (unsigned long)sch; 417 q->perturb_timer.function = sfq_perturbation; 418 419 for (i=0; i<SFQ_HASH_DIVISOR; i++) 420 q->ht[i] = SFQ_DEPTH; 421 for (i=0; i<SFQ_DEPTH; i++) { 422 skb_queue_head_init(&q->qs[i]); 423 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 424 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 425 } 426 q->limit = SFQ_DEPTH; 427 q->max_depth = 0; 428 q->tail = SFQ_DEPTH; 429 if (opt == NULL) { 430 q->quantum = psched_mtu(sch->dev); 431 q->perturb_period = 0; 432 } else { 433 int err = sfq_change(sch, opt); 434 if (err) 435 return err; 436 } 437 for (i=0; i<SFQ_DEPTH; i++) 438 sfq_link(q, i); 439 return 0; 440 } 441 442 static void sfq_destroy(struct Qdisc *sch) 443 { 444 struct sfq_sched_data *q = qdisc_priv(sch); 445 del_timer(&q->perturb_timer); 446 } 447 448 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) 449 { 450 struct sfq_sched_data *q = qdisc_priv(sch); 451 unsigned char *b = skb->tail; 452 struct tc_sfq_qopt opt; 453 454 opt.quantum = q->quantum; 455 opt.perturb_period = q->perturb_period/HZ; 456 457 opt.limit = q->limit; 458 opt.divisor = SFQ_HASH_DIVISOR; 459 opt.flows = q->limit; 460 461 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 462 463 return skb->len; 464 465 rtattr_failure: 466 skb_trim(skb, b - skb->data); 467 return -1; 468 } 469 470 static struct Qdisc_ops sfq_qdisc_ops = { 471 .next = NULL, 472 .cl_ops = NULL, 473 .id = "sfq", 474 .priv_size = sizeof(struct sfq_sched_data), 475 .enqueue = sfq_enqueue, 476 .dequeue = sfq_dequeue, 477 .requeue = sfq_requeue, 478 .drop = sfq_drop, 479 .init = sfq_init, 480 .reset = sfq_reset, 481 .destroy = sfq_destroy, 482 .change = NULL, 483 .dump = sfq_dump, 484 .owner = THIS_MODULE, 485 }; 486 487 static int __init sfq_module_init(void) 488 { 489 return register_qdisc(&sfq_qdisc_ops); 490 } 491 static void __exit sfq_module_exit(void) 492 { 493 unregister_qdisc(&sfq_qdisc_ops); 494 } 495 module_init(sfq_module_init) 496 module_exit(sfq_module_exit) 497 MODULE_LICENSE("GPL"); 498