1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/netdevice.h> 37 38 #include "cxgb4.h" 39 #include "sched.h" 40 41 static int t4_sched_class_fw_cmd(struct port_info *pi, 42 struct ch_sched_params *p, 43 enum sched_fw_ops op) 44 { 45 struct adapter *adap = pi->adapter; 46 struct sched_table *s = pi->sched_tbl; 47 struct sched_class *e; 48 int err = 0; 49 50 e = &s->tab[p->u.params.class]; 51 switch (op) { 52 case SCHED_FW_OP_ADD: 53 case SCHED_FW_OP_DEL: 54 err = t4_sched_params(adap, p->type, 55 p->u.params.level, p->u.params.mode, 56 p->u.params.rateunit, 57 p->u.params.ratemode, 58 p->u.params.channel, e->idx, 59 p->u.params.minrate, p->u.params.maxrate, 60 p->u.params.weight, p->u.params.pktsize); 61 break; 62 default: 63 err = -ENOTSUPP; 64 break; 65 } 66 67 return err; 68 } 69 70 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, 71 enum sched_bind_type type, bool bind) 72 { 73 struct adapter *adap = pi->adapter; 74 u32 fw_mnem, fw_class, fw_param; 75 unsigned int pf = adap->pf; 76 unsigned int vf = 0; 77 int err = 0; 78 79 switch (type) { 80 case SCHED_QUEUE: { 81 struct sched_queue_entry *qe; 82 83 qe = (struct sched_queue_entry *)arg; 84 85 /* Create a template for the FW_PARAMS_CMD mnemonic and 86 * value (TX Scheduling Class in this case). 87 */ 88 fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 89 FW_PARAMS_PARAM_X_V( 90 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 91 fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; 92 fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); 93 94 pf = adap->pf; 95 vf = 0; 96 97 err = t4_set_params(adap, adap->mbox, pf, vf, 1, 98 &fw_param, &fw_class); 99 break; 100 } 101 case SCHED_FLOWC: { 102 struct sched_flowc_entry *fe; 103 104 fe = (struct sched_flowc_entry *)arg; 105 106 fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE; 107 err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id], 108 fe->param.tid, fw_class); 109 break; 110 } 111 default: 112 err = -ENOTSUPP; 113 break; 114 } 115 116 return err; 117 } 118 119 static void *t4_sched_entry_lookup(struct port_info *pi, 120 enum sched_bind_type type, 121 const u32 val) 122 { 123 struct sched_table *s = pi->sched_tbl; 124 struct sched_class *e, *end; 125 void *found = NULL; 126 127 /* Look for an entry with matching @val */ 128 end = &s->tab[s->sched_size]; 129 for (e = &s->tab[0]; e != end; ++e) { 130 if (e->state == SCHED_STATE_UNUSED || 131 e->bind_type != type) 132 continue; 133 134 switch (type) { 135 case SCHED_QUEUE: { 136 struct sched_queue_entry *qe; 137 138 list_for_each_entry(qe, &e->entry_list, list) { 139 if (qe->cntxt_id == val) { 140 found = qe; 141 break; 142 } 143 } 144 break; 145 } 146 case SCHED_FLOWC: { 147 struct sched_flowc_entry *fe; 148 149 list_for_each_entry(fe, &e->entry_list, list) { 150 if (fe->param.tid == val) { 151 found = fe; 152 break; 153 } 154 } 155 break; 156 } 157 default: 158 return NULL; 159 } 160 161 if (found) 162 break; 163 } 164 165 return found; 166 } 167 168 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) 169 { 170 struct sched_queue_entry *qe = NULL; 171 struct adapter *adap = pi->adapter; 172 struct sge_eth_txq *txq; 173 struct sched_class *e; 174 int err = 0; 175 176 if (p->queue < 0 || p->queue >= pi->nqsets) 177 return -ERANGE; 178 179 txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 180 181 /* Find the existing entry that the queue is bound to */ 182 qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); 183 if (qe) { 184 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, 185 false); 186 if (err) 187 return err; 188 189 e = &pi->sched_tbl->tab[qe->param.class]; 190 list_del(&qe->list); 191 kvfree(qe); 192 if (atomic_dec_and_test(&e->refcnt)) 193 cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 194 } 195 return err; 196 } 197 198 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) 199 { 200 struct sched_table *s = pi->sched_tbl; 201 struct sched_queue_entry *qe = NULL; 202 struct adapter *adap = pi->adapter; 203 struct sge_eth_txq *txq; 204 struct sched_class *e; 205 unsigned int qid; 206 int err = 0; 207 208 if (p->queue < 0 || p->queue >= pi->nqsets) 209 return -ERANGE; 210 211 qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL); 212 if (!qe) 213 return -ENOMEM; 214 215 txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 216 qid = txq->q.cntxt_id; 217 218 /* Unbind queue from any existing class */ 219 err = t4_sched_queue_unbind(pi, p); 220 if (err) 221 goto out_err; 222 223 /* Bind queue to specified class */ 224 qe->cntxt_id = qid; 225 memcpy(&qe->param, p, sizeof(qe->param)); 226 227 e = &s->tab[qe->param.class]; 228 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); 229 if (err) 230 goto out_err; 231 232 list_add_tail(&qe->list, &e->entry_list); 233 e->bind_type = SCHED_QUEUE; 234 atomic_inc(&e->refcnt); 235 return err; 236 237 out_err: 238 kvfree(qe); 239 return err; 240 } 241 242 static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) 243 { 244 struct sched_flowc_entry *fe = NULL; 245 struct adapter *adap = pi->adapter; 246 struct sched_class *e; 247 int err = 0; 248 249 if (p->tid < 0 || p->tid >= adap->tids.neotids) 250 return -ERANGE; 251 252 /* Find the existing entry that the flowc is bound to */ 253 fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid); 254 if (fe) { 255 err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, 256 false); 257 if (err) 258 return err; 259 260 e = &pi->sched_tbl->tab[fe->param.class]; 261 list_del(&fe->list); 262 kvfree(fe); 263 if (atomic_dec_and_test(&e->refcnt)) 264 cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 265 } 266 return err; 267 } 268 269 static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p) 270 { 271 struct sched_table *s = pi->sched_tbl; 272 struct sched_flowc_entry *fe = NULL; 273 struct adapter *adap = pi->adapter; 274 struct sched_class *e; 275 int err = 0; 276 277 if (p->tid < 0 || p->tid >= adap->tids.neotids) 278 return -ERANGE; 279 280 fe = kvzalloc(sizeof(*fe), GFP_KERNEL); 281 if (!fe) 282 return -ENOMEM; 283 284 /* Unbind flowc from any existing class */ 285 err = t4_sched_flowc_unbind(pi, p); 286 if (err) 287 goto out_err; 288 289 /* Bind flowc to specified class */ 290 memcpy(&fe->param, p, sizeof(fe->param)); 291 292 e = &s->tab[fe->param.class]; 293 err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true); 294 if (err) 295 goto out_err; 296 297 list_add_tail(&fe->list, &e->entry_list); 298 e->bind_type = SCHED_FLOWC; 299 atomic_inc(&e->refcnt); 300 return err; 301 302 out_err: 303 kvfree(fe); 304 return err; 305 } 306 307 static void t4_sched_class_unbind_all(struct port_info *pi, 308 struct sched_class *e, 309 enum sched_bind_type type) 310 { 311 if (!e) 312 return; 313 314 switch (type) { 315 case SCHED_QUEUE: { 316 struct sched_queue_entry *qe; 317 318 list_for_each_entry(qe, &e->entry_list, list) 319 t4_sched_queue_unbind(pi, &qe->param); 320 break; 321 } 322 case SCHED_FLOWC: { 323 struct sched_flowc_entry *fe; 324 325 list_for_each_entry(fe, &e->entry_list, list) 326 t4_sched_flowc_unbind(pi, &fe->param); 327 break; 328 } 329 default: 330 break; 331 } 332 } 333 334 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, 335 enum sched_bind_type type, bool bind) 336 { 337 int err = 0; 338 339 if (!arg) 340 return -EINVAL; 341 342 switch (type) { 343 case SCHED_QUEUE: { 344 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 345 346 if (bind) 347 err = t4_sched_queue_bind(pi, qe); 348 else 349 err = t4_sched_queue_unbind(pi, qe); 350 break; 351 } 352 case SCHED_FLOWC: { 353 struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 354 355 if (bind) 356 err = t4_sched_flowc_bind(pi, fe); 357 else 358 err = t4_sched_flowc_unbind(pi, fe); 359 break; 360 } 361 default: 362 err = -ENOTSUPP; 363 break; 364 } 365 366 return err; 367 } 368 369 /** 370 * cxgb4_sched_class_bind - Bind an entity to a scheduling class 371 * @dev: net_device pointer 372 * @arg: Entity opaque data 373 * @type: Entity type (Queue) 374 * 375 * Binds an entity (queue) to a scheduling class. If the entity 376 * is bound to another class, it will be unbound from the other class 377 * and bound to the class specified in @arg. 378 */ 379 int cxgb4_sched_class_bind(struct net_device *dev, void *arg, 380 enum sched_bind_type type) 381 { 382 struct port_info *pi = netdev2pinfo(dev); 383 u8 class_id; 384 385 if (!can_sched(dev)) 386 return -ENOTSUPP; 387 388 if (!arg) 389 return -EINVAL; 390 391 switch (type) { 392 case SCHED_QUEUE: { 393 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 394 395 class_id = qe->class; 396 break; 397 } 398 case SCHED_FLOWC: { 399 struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 400 401 class_id = fe->class; 402 break; 403 } 404 default: 405 return -ENOTSUPP; 406 } 407 408 if (!valid_class_id(dev, class_id)) 409 return -EINVAL; 410 411 if (class_id == SCHED_CLS_NONE) 412 return -ENOTSUPP; 413 414 return t4_sched_class_bind_unbind_op(pi, arg, type, true); 415 416 } 417 418 /** 419 * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class 420 * @dev: net_device pointer 421 * @arg: Entity opaque data 422 * @type: Entity type (Queue) 423 * 424 * Unbinds an entity (queue) from a scheduling class. 425 */ 426 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, 427 enum sched_bind_type type) 428 { 429 struct port_info *pi = netdev2pinfo(dev); 430 u8 class_id; 431 432 if (!can_sched(dev)) 433 return -ENOTSUPP; 434 435 if (!arg) 436 return -EINVAL; 437 438 switch (type) { 439 case SCHED_QUEUE: { 440 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 441 442 class_id = qe->class; 443 break; 444 } 445 case SCHED_FLOWC: { 446 struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 447 448 class_id = fe->class; 449 break; 450 } 451 default: 452 return -ENOTSUPP; 453 } 454 455 if (!valid_class_id(dev, class_id)) 456 return -EINVAL; 457 458 return t4_sched_class_bind_unbind_op(pi, arg, type, false); 459 } 460 461 /* If @p is NULL, fetch any available unused class */ 462 static struct sched_class *t4_sched_class_lookup(struct port_info *pi, 463 const struct ch_sched_params *p) 464 { 465 struct sched_table *s = pi->sched_tbl; 466 struct sched_class *found = NULL; 467 struct sched_class *e, *end; 468 469 if (!p) { 470 /* Get any available unused class */ 471 end = &s->tab[s->sched_size]; 472 for (e = &s->tab[0]; e != end; ++e) { 473 if (e->state == SCHED_STATE_UNUSED) { 474 found = e; 475 break; 476 } 477 } 478 } else { 479 /* Look for a class with matching scheduling parameters */ 480 struct ch_sched_params info; 481 struct ch_sched_params tp; 482 483 memcpy(&tp, p, sizeof(tp)); 484 /* Don't try to match class parameter */ 485 tp.u.params.class = SCHED_CLS_NONE; 486 487 end = &s->tab[s->sched_size]; 488 for (e = &s->tab[0]; e != end; ++e) { 489 if (e->state == SCHED_STATE_UNUSED) 490 continue; 491 492 memcpy(&info, &e->info, sizeof(info)); 493 /* Don't try to match class parameter */ 494 info.u.params.class = SCHED_CLS_NONE; 495 496 if ((info.type == tp.type) && 497 (!memcmp(&info.u.params, &tp.u.params, 498 sizeof(info.u.params)))) { 499 found = e; 500 break; 501 } 502 } 503 } 504 505 return found; 506 } 507 508 static struct sched_class *t4_sched_class_alloc(struct port_info *pi, 509 struct ch_sched_params *p) 510 { 511 struct sched_class *e = NULL; 512 u8 class_id; 513 int err; 514 515 if (!p) 516 return NULL; 517 518 class_id = p->u.params.class; 519 520 /* Only accept search for existing class with matching params 521 * or allocation of new class with specified params 522 */ 523 if (class_id != SCHED_CLS_NONE) 524 return NULL; 525 526 /* See if there's an exisiting class with same requested sched 527 * params. Classes can only be shared among FLOWC types. For 528 * other types, always request a new class. 529 */ 530 if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 531 e = t4_sched_class_lookup(pi, p); 532 533 if (!e) { 534 struct ch_sched_params np; 535 536 /* Fetch any available unused class */ 537 e = t4_sched_class_lookup(pi, NULL); 538 if (!e) 539 return NULL; 540 541 memcpy(&np, p, sizeof(np)); 542 np.u.params.class = e->idx; 543 /* New class */ 544 err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); 545 if (err) 546 return NULL; 547 memcpy(&e->info, &np, sizeof(e->info)); 548 atomic_set(&e->refcnt, 0); 549 e->state = SCHED_STATE_ACTIVE; 550 } 551 552 return e; 553 } 554 555 /** 556 * cxgb4_sched_class_alloc - allocate a scheduling class 557 * @dev: net_device pointer 558 * @p: new scheduling class to create. 559 * 560 * Returns pointer to the scheduling class created. If @p is NULL, then 561 * it allocates and returns any available unused scheduling class. If a 562 * scheduling class with matching @p is found, then the matching class is 563 * returned. 564 */ 565 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, 566 struct ch_sched_params *p) 567 { 568 struct port_info *pi = netdev2pinfo(dev); 569 u8 class_id; 570 571 if (!can_sched(dev)) 572 return NULL; 573 574 class_id = p->u.params.class; 575 if (!valid_class_id(dev, class_id)) 576 return NULL; 577 578 return t4_sched_class_alloc(pi, p); 579 } 580 581 /** 582 * cxgb4_sched_class_free - free a scheduling class 583 * @dev: net_device pointer 584 * @e: scheduling class 585 * 586 * Frees a scheduling class if there are no users. 587 */ 588 void cxgb4_sched_class_free(struct net_device *dev, u8 classid) 589 { 590 struct port_info *pi = netdev2pinfo(dev); 591 struct sched_table *s = pi->sched_tbl; 592 struct ch_sched_params p; 593 struct sched_class *e; 594 u32 speed; 595 int ret; 596 597 e = &s->tab[classid]; 598 if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { 599 /* Port based rate limiting needs explicit reset back 600 * to max rate. But, we'll do explicit reset for all 601 * types, instead of just port based type, to be on 602 * the safer side. 603 */ 604 memcpy(&p, &e->info, sizeof(p)); 605 /* Always reset mode to 0. Otherwise, FLOWC mode will 606 * still be enabled even after resetting the traffic 607 * class. 608 */ 609 p.u.params.mode = 0; 610 p.u.params.minrate = 0; 611 p.u.params.pktsize = 0; 612 613 ret = t4_get_link_params(pi, NULL, &speed, NULL); 614 if (!ret) 615 p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ 616 else 617 p.u.params.maxrate = SCHED_MAX_RATE_KBPS; 618 619 t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); 620 621 e->state = SCHED_STATE_UNUSED; 622 memset(&e->info, 0, sizeof(e->info)); 623 } 624 } 625 626 static void t4_sched_class_free(struct net_device *dev, struct sched_class *e) 627 { 628 struct port_info *pi = netdev2pinfo(dev); 629 630 t4_sched_class_unbind_all(pi, e, e->bind_type); 631 cxgb4_sched_class_free(dev, e->idx); 632 } 633 634 struct sched_table *t4_init_sched(unsigned int sched_size) 635 { 636 struct sched_table *s; 637 unsigned int i; 638 639 s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL); 640 if (!s) 641 return NULL; 642 643 s->sched_size = sched_size; 644 645 for (i = 0; i < s->sched_size; i++) { 646 memset(&s->tab[i], 0, sizeof(struct sched_class)); 647 s->tab[i].idx = i; 648 s->tab[i].state = SCHED_STATE_UNUSED; 649 INIT_LIST_HEAD(&s->tab[i].entry_list); 650 atomic_set(&s->tab[i].refcnt, 0); 651 } 652 return s; 653 } 654 655 void t4_cleanup_sched(struct adapter *adap) 656 { 657 struct sched_table *s; 658 unsigned int j, i; 659 660 for_each_port(adap, j) { 661 struct port_info *pi = netdev2pinfo(adap->port[j]); 662 663 s = pi->sched_tbl; 664 if (!s) 665 continue; 666 667 for (i = 0; i < s->sched_size; i++) { 668 struct sched_class *e; 669 670 e = &s->tab[i]; 671 if (e->state == SCHED_STATE_ACTIVE) 672 t4_sched_class_free(adap->port[j], e); 673 } 674 kvfree(s); 675 } 676 } 677