1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sched/sch_drr.c Deficit Round Robin scheduler 4 * 5 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/netdevice.h> 13 #include <linux/pkt_sched.h> 14 #include <net/sch_generic.h> 15 #include <net/pkt_sched.h> 16 #include <net/pkt_cls.h> 17 18 struct drr_class { 19 struct Qdisc_class_common common; 20 21 struct gnet_stats_basic_sync bstats; 22 struct gnet_stats_queue qstats; 23 struct net_rate_estimator __rcu *rate_est; 24 struct list_head alist; 25 struct Qdisc *qdisc; 26 27 u32 quantum; 28 u32 deficit; 29 }; 30 31 struct drr_sched { 32 struct list_head active; 33 struct tcf_proto __rcu *filter_list; 34 struct tcf_block *block; 35 struct Qdisc_class_hash clhash; 36 }; 37 38 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) 39 { 40 struct drr_sched *q = qdisc_priv(sch); 41 struct Qdisc_class_common *clc; 42 43 clc = qdisc_class_find(&q->clhash, classid); 44 if (clc == NULL) 45 return NULL; 46 return container_of(clc, struct drr_class, common); 47 } 48 49 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { 50 [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, 51 }; 52 53 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 54 struct nlattr **tca, unsigned long *arg, 55 struct netlink_ext_ack *extack) 56 { 57 struct drr_sched *q = qdisc_priv(sch); 58 struct drr_class *cl = (struct drr_class *)*arg; 59 struct nlattr *opt = tca[TCA_OPTIONS]; 60 struct nlattr *tb[TCA_DRR_MAX + 1]; 61 u32 quantum; 62 int err; 63 64 if (!opt) { 65 NL_SET_ERR_MSG(extack, "DRR options are required for this operation"); 66 return -EINVAL; 67 } 68 69 err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy, 70 extack); 71 if (err < 0) 72 return err; 73 74 if (tb[TCA_DRR_QUANTUM]) { 75 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); 76 if (quantum == 0) { 77 NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero"); 78 return -EINVAL; 79 } 80 } else 81 quantum = psched_mtu(qdisc_dev(sch)); 82 83 if (cl != NULL) { 84 if (tca[TCA_RATE]) { 85 err = gen_replace_estimator(&cl->bstats, NULL, 86 &cl->rate_est, 87 NULL, true, 88 tca[TCA_RATE]); 89 if (err) { 90 NL_SET_ERR_MSG(extack, "Failed to replace estimator"); 91 return err; 92 } 93 } 94 95 sch_tree_lock(sch); 96 if (tb[TCA_DRR_QUANTUM]) 97 cl->quantum = quantum; 98 sch_tree_unlock(sch); 99 100 return 0; 101 } 102 103 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL); 104 if (cl == NULL) 105 return -ENOBUFS; 106 107 gnet_stats_basic_sync_init(&cl->bstats); 108 cl->common.classid = classid; 109 cl->quantum = quantum; 110 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 111 &pfifo_qdisc_ops, classid, 112 NULL); 113 if (cl->qdisc == NULL) 114 cl->qdisc = &noop_qdisc; 115 else 116 qdisc_hash_add(cl->qdisc, true); 117 118 if (tca[TCA_RATE]) { 119 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, 120 NULL, true, tca[TCA_RATE]); 121 if (err) { 122 NL_SET_ERR_MSG(extack, "Failed to replace estimator"); 123 qdisc_put(cl->qdisc); 124 kfree(cl); 125 return err; 126 } 127 } 128 129 sch_tree_lock(sch); 130 qdisc_class_hash_insert(&q->clhash, &cl->common); 131 sch_tree_unlock(sch); 132 133 qdisc_class_hash_grow(sch, &q->clhash); 134 135 *arg = (unsigned long)cl; 136 return 0; 137 } 138 139 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) 140 { 141 gen_kill_estimator(&cl->rate_est); 142 qdisc_put(cl->qdisc); 143 kfree(cl); 144 } 145 146 static int drr_delete_class(struct Qdisc *sch, unsigned long arg, 147 struct netlink_ext_ack *extack) 148 { 149 struct drr_sched *q = qdisc_priv(sch); 150 struct drr_class *cl = (struct drr_class *)arg; 151 152 if (qdisc_class_in_use(&cl->common)) { 153 NL_SET_ERR_MSG(extack, "DRR class is in use"); 154 return -EBUSY; 155 } 156 157 sch_tree_lock(sch); 158 159 qdisc_purge_queue(cl->qdisc); 160 qdisc_class_hash_remove(&q->clhash, &cl->common); 161 162 sch_tree_unlock(sch); 163 164 drr_destroy_class(sch, cl); 165 return 0; 166 } 167 168 static unsigned long drr_search_class(struct Qdisc *sch, u32 classid) 169 { 170 return (unsigned long)drr_find_class(sch, classid); 171 } 172 173 static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl, 174 struct netlink_ext_ack *extack) 175 { 176 struct drr_sched *q = qdisc_priv(sch); 177 178 if (cl) { 179 NL_SET_ERR_MSG(extack, "DRR classid must be zero"); 180 return NULL; 181 } 182 183 return q->block; 184 } 185 186 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, 187 u32 classid) 188 { 189 struct drr_class *cl = drr_find_class(sch, classid); 190 191 if (cl) 192 qdisc_class_get(&cl->common); 193 194 return (unsigned long)cl; 195 } 196 197 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) 198 { 199 struct drr_class *cl = (struct drr_class *)arg; 200 201 qdisc_class_put(&cl->common); 202 } 203 204 static int drr_graft_class(struct Qdisc *sch, unsigned long arg, 205 struct Qdisc *new, struct Qdisc **old, 206 struct netlink_ext_ack *extack) 207 { 208 struct drr_class *cl = (struct drr_class *)arg; 209 210 if (new == NULL) { 211 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, 212 cl->common.classid, NULL); 213 if (new == NULL) 214 new = &noop_qdisc; 215 } 216 217 *old = qdisc_replace(sch, new, &cl->qdisc); 218 return 0; 219 } 220 221 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) 222 { 223 struct drr_class *cl = (struct drr_class *)arg; 224 225 return cl->qdisc; 226 } 227 228 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) 229 { 230 struct drr_class *cl = (struct drr_class *)arg; 231 232 list_del(&cl->alist); 233 } 234 235 static int drr_dump_class(struct Qdisc *sch, unsigned long arg, 236 struct sk_buff *skb, struct tcmsg *tcm) 237 { 238 struct drr_class *cl = (struct drr_class *)arg; 239 struct nlattr *nest; 240 241 tcm->tcm_parent = TC_H_ROOT; 242 tcm->tcm_handle = cl->common.classid; 243 tcm->tcm_info = cl->qdisc->handle; 244 245 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 246 if (nest == NULL) 247 goto nla_put_failure; 248 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum)) 249 goto nla_put_failure; 250 return nla_nest_end(skb, nest); 251 252 nla_put_failure: 253 nla_nest_cancel(skb, nest); 254 return -EMSGSIZE; 255 } 256 257 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, 258 struct gnet_dump *d) 259 { 260 struct drr_class *cl = (struct drr_class *)arg; 261 __u32 qlen = qdisc_qlen_sum(cl->qdisc); 262 struct Qdisc *cl_q = cl->qdisc; 263 struct tc_drr_stats xstats; 264 265 memset(&xstats, 0, sizeof(xstats)); 266 if (qlen) 267 xstats.deficit = cl->deficit; 268 269 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || 270 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 271 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) 272 return -1; 273 274 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 275 } 276 277 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) 278 { 279 struct drr_sched *q = qdisc_priv(sch); 280 struct drr_class *cl; 281 unsigned int i; 282 283 if (arg->stop) 284 return; 285 286 for (i = 0; i < q->clhash.hashsize; i++) { 287 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 288 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) 289 return; 290 } 291 } 292 } 293 294 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, 295 int *qerr) 296 { 297 struct drr_sched *q = qdisc_priv(sch); 298 struct drr_class *cl; 299 struct tcf_result res; 300 struct tcf_proto *fl; 301 int result; 302 303 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { 304 cl = drr_find_class(sch, skb->priority); 305 if (cl != NULL) 306 return cl; 307 } 308 309 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 310 fl = rcu_dereference_bh(q->filter_list); 311 result = tcf_classify(skb, NULL, fl, &res, false); 312 if (result >= 0) { 313 #ifdef CONFIG_NET_CLS_ACT 314 switch (result) { 315 case TC_ACT_QUEUED: 316 case TC_ACT_STOLEN: 317 case TC_ACT_TRAP: 318 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; 319 fallthrough; 320 case TC_ACT_SHOT: 321 return NULL; 322 } 323 #endif 324 cl = (struct drr_class *)res.class; 325 if (cl == NULL) 326 cl = drr_find_class(sch, res.classid); 327 return cl; 328 } 329 return NULL; 330 } 331 332 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, 333 struct sk_buff **to_free) 334 { 335 unsigned int len = qdisc_pkt_len(skb); 336 struct drr_sched *q = qdisc_priv(sch); 337 struct drr_class *cl; 338 int err = 0; 339 bool first; 340 341 cl = drr_classify(skb, sch, &err); 342 if (cl == NULL) { 343 if (err & __NET_XMIT_BYPASS) 344 qdisc_qstats_drop(sch); 345 __qdisc_drop(skb, to_free); 346 return err; 347 } 348 349 first = !cl->qdisc->q.qlen; 350 err = qdisc_enqueue(skb, cl->qdisc, to_free); 351 if (unlikely(err != NET_XMIT_SUCCESS)) { 352 if (net_xmit_drop_count(err)) { 353 cl->qstats.drops++; 354 qdisc_qstats_drop(sch); 355 } 356 return err; 357 } 358 359 if (first) { 360 list_add_tail(&cl->alist, &q->active); 361 cl->deficit = cl->quantum; 362 } 363 364 sch->qstats.backlog += len; 365 sch->q.qlen++; 366 return err; 367 } 368 369 static struct sk_buff *drr_dequeue(struct Qdisc *sch) 370 { 371 struct drr_sched *q = qdisc_priv(sch); 372 struct drr_class *cl; 373 struct sk_buff *skb; 374 unsigned int len; 375 376 if (list_empty(&q->active)) 377 goto out; 378 while (1) { 379 cl = list_first_entry(&q->active, struct drr_class, alist); 380 skb = cl->qdisc->ops->peek(cl->qdisc); 381 if (skb == NULL) { 382 qdisc_warn_nonwc(__func__, cl->qdisc); 383 goto out; 384 } 385 386 len = qdisc_pkt_len(skb); 387 if (len <= cl->deficit) { 388 cl->deficit -= len; 389 skb = qdisc_dequeue_peeked(cl->qdisc); 390 if (unlikely(skb == NULL)) 391 goto out; 392 if (cl->qdisc->q.qlen == 0) 393 list_del(&cl->alist); 394 395 bstats_update(&cl->bstats, skb); 396 qdisc_bstats_update(sch, skb); 397 qdisc_qstats_backlog_dec(sch, skb); 398 sch->q.qlen--; 399 return skb; 400 } 401 402 cl->deficit += cl->quantum; 403 list_move_tail(&cl->alist, &q->active); 404 } 405 out: 406 return NULL; 407 } 408 409 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt, 410 struct netlink_ext_ack *extack) 411 { 412 struct drr_sched *q = qdisc_priv(sch); 413 int err; 414 415 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 416 if (err) 417 return err; 418 err = qdisc_class_hash_init(&q->clhash); 419 if (err < 0) 420 return err; 421 INIT_LIST_HEAD(&q->active); 422 return 0; 423 } 424 425 static void drr_reset_qdisc(struct Qdisc *sch) 426 { 427 struct drr_sched *q = qdisc_priv(sch); 428 struct drr_class *cl; 429 unsigned int i; 430 431 for (i = 0; i < q->clhash.hashsize; i++) { 432 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { 433 if (cl->qdisc->q.qlen) 434 list_del(&cl->alist); 435 qdisc_reset(cl->qdisc); 436 } 437 } 438 } 439 440 static void drr_destroy_qdisc(struct Qdisc *sch) 441 { 442 struct drr_sched *q = qdisc_priv(sch); 443 struct drr_class *cl; 444 struct hlist_node *next; 445 unsigned int i; 446 447 tcf_block_put(q->block); 448 449 for (i = 0; i < q->clhash.hashsize; i++) { 450 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], 451 common.hnode) 452 drr_destroy_class(sch, cl); 453 } 454 qdisc_class_hash_destroy(&q->clhash); 455 } 456 457 static const struct Qdisc_class_ops drr_class_ops = { 458 .change = drr_change_class, 459 .delete = drr_delete_class, 460 .find = drr_search_class, 461 .tcf_block = drr_tcf_block, 462 .bind_tcf = drr_bind_tcf, 463 .unbind_tcf = drr_unbind_tcf, 464 .graft = drr_graft_class, 465 .leaf = drr_class_leaf, 466 .qlen_notify = drr_qlen_notify, 467 .dump = drr_dump_class, 468 .dump_stats = drr_dump_class_stats, 469 .walk = drr_walk, 470 }; 471 472 static struct Qdisc_ops drr_qdisc_ops __read_mostly = { 473 .cl_ops = &drr_class_ops, 474 .id = "drr", 475 .priv_size = sizeof(struct drr_sched), 476 .enqueue = drr_enqueue, 477 .dequeue = drr_dequeue, 478 .peek = qdisc_peek_dequeued, 479 .init = drr_init_qdisc, 480 .reset = drr_reset_qdisc, 481 .destroy = drr_destroy_qdisc, 482 .owner = THIS_MODULE, 483 }; 484 485 static int __init drr_init(void) 486 { 487 return register_qdisc(&drr_qdisc_ops); 488 } 489 490 static void __exit drr_exit(void) 491 { 492 unregister_qdisc(&drr_qdisc_ops); 493 } 494 495 module_init(drr_init); 496 module_exit(drr_exit); 497 MODULE_LICENSE("GPL"); 498