1 /* 2 * net/sched/sch_drr.c Deficit Round Robin scheduler 3 * 4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * version 2 as published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/init.h> 14 #include <linux/errno.h> 15 #include <linux/netdevice.h> 16 #include <linux/pkt_sched.h> 17 #include <net/sch_generic.h> 18 #include <net/pkt_sched.h> 19 #include <net/pkt_cls.h> 20 21 struct drr_class { 22 struct Qdisc_class_common common; 23 unsigned int filter_cnt; 24 25 struct gnet_stats_basic_packed bstats; 26 struct gnet_stats_queue qstats; 27 struct net_rate_estimator __rcu *rate_est; 28 struct list_head alist; 29 struct Qdisc *qdisc; 30 31 u32 quantum; 32 u32 deficit; 33 }; 34 35 struct drr_sched { 36 struct list_head active; 37 struct tcf_proto __rcu *filter_list; 38 struct tcf_block *block; 39 struct Qdisc_class_hash clhash; 40 }; 41 42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) 43 { 44 struct drr_sched *q = qdisc_priv(sch); 45 struct Qdisc_class_common *clc; 46 47 clc = qdisc_class_find(&q->clhash, classid); 48 if (clc == NULL) 49 return NULL; 50 return container_of(clc, struct drr_class, common); 51 } 52 53 static void drr_purge_queue(struct drr_class *cl) 54 { 55 unsigned int len = cl->qdisc->q.qlen; 56 unsigned int backlog = cl->qdisc->qstats.backlog; 57 58 qdisc_reset(cl->qdisc); 59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); 60 } 61 62 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { 63 [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, 64 }; 65 66 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 67 struct nlattr **tca, unsigned long *arg) 68 { 69 struct drr_sched *q = qdisc_priv(sch); 70 struct drr_class *cl = (struct drr_class *)*arg; 71 struct nlattr *opt = tca[TCA_OPTIONS]; 72 struct nlattr *tb[TCA_DRR_MAX + 1]; 73 u32 quantum; 74 int err; 75 76 if (!opt) 77 return -EINVAL; 78 79 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL); 80 if (err < 0) 81 return err; 82 83 if (tb[TCA_DRR_QUANTUM]) { 84 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); 85 if (quantum == 0) 86 return -EINVAL; 87 } else 88 quantum = psched_mtu(qdisc_dev(sch)); 89 90 if (cl != NULL) { 91 if (tca[TCA_RATE]) { 92 err = gen_replace_estimator(&cl->bstats, NULL, 93 &cl->rate_est, 94 NULL, 95 qdisc_root_sleeping_running(sch), 96 tca[TCA_RATE]); 97 if (err) 98 return err; 99 } 100 101 sch_tree_lock(sch); 102 if (tb[TCA_DRR_QUANTUM]) 103 cl->quantum = quantum; 104 sch_tree_unlock(sch); 105 106 return 0; 107 } 108 109 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL); 110 if (cl == NULL) 111 return -ENOBUFS; 112 113 cl->common.classid = classid; 114 cl->quantum = quantum; 115 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 116 &pfifo_qdisc_ops, classid); 117 if (cl->qdisc == NULL) 118 cl->qdisc = &noop_qdisc; 119 else 120 qdisc_hash_add(cl->qdisc, true); 121 122 if (tca[TCA_RATE]) { 123 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 124 NULL, 125 qdisc_root_sleeping_running(sch), 126 tca[TCA_RATE]); 127 if (err) { 128 qdisc_destroy(cl->qdisc); 129 kfree(cl); 130 return err; 131 } 132 } 133 134 sch_tree_lock(sch); 135 qdisc_class_hash_insert(&q->clhash, &cl->common); 136 sch_tree_unlock(sch); 137 138 qdisc_class_hash_grow(sch, &q->clhash); 139 140 *arg = (unsigned long)cl; 141 return 0; 142 } 143 144 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) 145 { 146 gen_kill_estimator(&cl->rate_est); 147 qdisc_destroy(cl->qdisc); 148 kfree(cl); 149 } 150 151 static int drr_delete_class(struct Qdisc *sch, unsigned long arg) 152 { 153 struct drr_sched *q = qdisc_priv(sch); 154 struct drr_class *cl = (struct drr_class *)arg; 155 156 if (cl->filter_cnt > 0) 157 return -EBUSY; 158 159 sch_tree_lock(sch); 160 161 drr_purge_queue(cl); 162 qdisc_class_hash_remove(&q->clhash, &cl->common); 163 164 sch_tree_unlock(sch); 165 166 drr_destroy_class(sch, cl); 167 return 0; 168 } 169 170 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid) 171 { 172 return (unsigned long)drr_find_class(sch, classid); 173 } 174 175 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl) 176 { 177 struct drr_sched *q = qdisc_priv(sch); 178 179 if (cl) 180 return NULL; 181 182 return q->block; 183 } 184 185 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, 186 u32 classid) 187 { 188 struct drr_class *cl = drr_find_class(sch, classid); 189 190 if (cl != NULL) 191 cl->filter_cnt++; 192 193 return (unsigned long)cl; 194 } 195 196 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) 197 { 198 struct drr_class *cl = (struct drr_class *)arg; 199 200 cl->filter_cnt--; 201 } 202 203 static int drr_graft_class(struct Qdisc *sch, unsigned long arg, 204 struct Qdisc *new, struct Qdisc **old) 205 { 206 struct drr_class *cl = (struct drr_class *)arg; 207 208 if (new == NULL) { 209 new = qdisc_create_dflt(sch->dev_queue, 210 &pfifo_qdisc_ops, cl->common.classid); 211 if (new == NULL) 212 new = &noop_qdisc; 213 } 214 215 *old = qdisc_replace(sch, new, &cl->qdisc); 216 return 0; 217 } 218 219 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) 220 { 221 struct drr_class *cl = (struct drr_class *)arg; 222 223 return cl->qdisc; 224 } 225 226 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) 227 { 228 struct drr_class *cl = (struct drr_class *)arg; 229 230 list_del(&cl->alist); 231 } 232 233 static int drr_dump_class(struct Qdisc *sch, unsigned long arg, 234 struct sk_buff *skb, struct tcmsg *tcm) 235 { 236 struct drr_class *cl = (struct drr_class *)arg; 237 struct nlattr *nest; 238 239 tcm->tcm_parent = TC_H_ROOT; 240 tcm->tcm_handle = cl->common.classid; 241 tcm->tcm_info = cl->qdisc->handle; 242 243 nest = nla_nest_start(skb, TCA_OPTIONS); 244 if (nest == NULL) 245 goto nla_put_failure; 246 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum)) 247 goto nla_put_failure; 248 return nla_nest_end(skb, nest); 249 250 nla_put_failure: 251 nla_nest_cancel(skb, nest); 252 return -EMSGSIZE; 253 } 254 255 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, 256 struct gnet_dump *d) 257 { 258 struct drr_class *cl = (struct drr_class *)arg; 259 __u32 qlen = cl->qdisc->q.qlen; 260 struct tc_drr_stats xstats; 261 262 memset(&xstats, 0, sizeof(xstats)); 263 if (qlen) 264 xstats.deficit = cl->deficit; 265 266 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 267 d, NULL, &cl->bstats) < 0 || 268 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 269 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) 270 return -1; 271 272 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 273 } 274 275 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) 276 { 277 struct drr_sched *q = qdisc_priv(sch); 278 struct drr_class *cl; 279 unsigned int i; 280 281 if (arg->stop) 282 return; 283 284 for (i = 0; i < q->clhash.hashsize; i++) { 285 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 286 if (arg->count < arg->skip) { 287 arg->count++; 288 continue; 289 } 290 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { 291 arg->stop = 1; 292 return; 293 } 294 arg->count++; 295 } 296 } 297 } 298 299 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, 300 int *qerr) 301 { 302 struct drr_sched *q = qdisc_priv(sch); 303 struct drr_class *cl; 304 struct tcf_result res; 305 struct tcf_proto *fl; 306 int result; 307 308 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { 309 cl = drr_find_class(sch, skb->priority); 310 if (cl != NULL) 311 return cl; 312 } 313 314 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 315 fl = rcu_dereference_bh(q->filter_list); 316 result = tcf_classify(skb, fl, &res, false); 317 if (result >= 0) { 318 #ifdef CONFIG_NET_CLS_ACT 319 switch (result) { 320 case TC_ACT_QUEUED: 321 case TC_ACT_STOLEN: 322 case TC_ACT_TRAP: 323 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 324 /* fall through */ 325 case TC_ACT_SHOT: 326 return NULL; 327 } 328 #endif 329 cl = (struct drr_class *)res.class; 330 if (cl == NULL) 331 cl = drr_find_class(sch, res.classid); 332 return cl; 333 } 334 return NULL; 335 } 336 337 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, 338 struct sk_buff **to_free) 339 { 340 struct drr_sched *q = qdisc_priv(sch); 341 struct drr_class *cl; 342 int err = 0; 343 344 cl = drr_classify(skb, sch, &err); 345 if (cl == NULL) { 346 if (err & __NET_XMIT_BYPASS) 347 qdisc_qstats_drop(sch); 348 __qdisc_drop(skb, to_free); 349 return err; 350 } 351 352 err = qdisc_enqueue(skb, cl->qdisc, to_free); 353 if (unlikely(err != NET_XMIT_SUCCESS)) { 354 if (net_xmit_drop_count(err)) { 355 cl->qstats.drops++; 356 qdisc_qstats_drop(sch); 357 } 358 return err; 359 } 360 361 if (cl->qdisc->q.qlen == 1) { 362 list_add_tail(&cl->alist, &q->active); 363 cl->deficit = cl->quantum; 364 } 365 366 qdisc_qstats_backlog_inc(sch, skb); 367 sch->q.qlen++; 368 return err; 369 } 370 371 static struct sk_buff *drr_dequeue(struct Qdisc *sch) 372 { 373 struct drr_sched *q = qdisc_priv(sch); 374 struct drr_class *cl; 375 struct sk_buff *skb; 376 unsigned int len; 377 378 if (list_empty(&q->active)) 379 goto out; 380 while (1) { 381 cl = list_first_entry(&q->active, struct drr_class, alist); 382 skb = cl->qdisc->ops->peek(cl->qdisc); 383 if (skb == NULL) { 384 qdisc_warn_nonwc(__func__, cl->qdisc); 385 goto out; 386 } 387 388 len = qdisc_pkt_len(skb); 389 if (len <= cl->deficit) { 390 cl->deficit -= len; 391 skb = qdisc_dequeue_peeked(cl->qdisc); 392 if (unlikely(skb == NULL)) 393 goto out; 394 if (cl->qdisc->q.qlen == 0) 395 list_del(&cl->alist); 396 397 bstats_update(&cl->bstats, skb); 398 qdisc_bstats_update(sch, skb); 399 qdisc_qstats_backlog_dec(sch, skb); 400 sch->q.qlen--; 401 return skb; 402 } 403 404 cl->deficit += cl->quantum; 405 list_move_tail(&cl->alist, &q->active); 406 } 407 out: 408 return NULL; 409 } 410 411 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) 412 { 413 struct drr_sched *q = qdisc_priv(sch); 414 int err; 415 416 err = tcf_block_get(&q->block, &q->filter_list, sch); 417 if (err) 418 return err; 419 err = qdisc_class_hash_init(&q->clhash); 420 if (err < 0) 421 return err; 422 INIT_LIST_HEAD(&q->active); 423 return 0; 424 } 425 426 static void drr_reset_qdisc(struct Qdisc *sch) 427 { 428 struct drr_sched *q = qdisc_priv(sch); 429 struct drr_class *cl; 430 unsigned int i; 431 432 for (i = 0; i < q->clhash.hashsize; i++) { 433 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 434 if (cl->qdisc->q.qlen) 435 list_del(&cl->alist); 436 qdisc_reset(cl->qdisc); 437 } 438 } 439 sch->qstats.backlog = 0; 440 sch->q.qlen = 0; 441 } 442 443 static void drr_destroy_qdisc(struct Qdisc *sch) 444 { 445 struct drr_sched *q = qdisc_priv(sch); 446 struct drr_class *cl; 447 struct hlist_node *next; 448 unsigned int i; 449 450 tcf_block_put(q->block); 451 452 for (i = 0; i < q->clhash.hashsize; i++) { 453 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 454 common.hnode) 455 drr_destroy_class(sch, cl); 456 } 457 qdisc_class_hash_destroy(&q->clhash); 458 } 459 460 static const struct Qdisc_class_ops drr_class_ops = { 461 .change = drr_change_class, 462 .delete = drr_delete_class, 463 .find = drr_search_class, 464 .tcf_block = drr_tcf_block, 465 .bind_tcf = drr_bind_tcf, 466 .unbind_tcf = drr_unbind_tcf, 467 .graft = drr_graft_class, 468 .leaf = drr_class_leaf, 469 .qlen_notify = drr_qlen_notify, 470 .dump = drr_dump_class, 471 .dump_stats = drr_dump_class_stats, 472 .walk = drr_walk, 473 }; 474 475 static struct Qdisc_ops drr_qdisc_ops __read_mostly = { 476 .cl_ops = &drr_class_ops, 477 .id = "drr", 478 .priv_size = sizeof(struct drr_sched), 479 .enqueue = drr_enqueue, 480 .dequeue = drr_dequeue, 481 .peek = qdisc_peek_dequeued, 482 .init = drr_init_qdisc, 483 .reset = drr_reset_qdisc, 484 .destroy = drr_destroy_qdisc, 485 .owner = THIS_MODULE, 486 }; 487 488 static int __init drr_init(void) 489 { 490 return register_qdisc(&drr_qdisc_ops); 491 } 492 493 static void __exit drr_exit(void) 494 { 495 unregister_qdisc(&drr_qdisc_ops); 496 } 497 498 module_init(drr_init); 499 module_exit(drr_exit); 500 MODULE_LICENSE("GPL"); 501