1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/netdevice.h> 37 38 #include "cxgb4.h" 39 #include "sched.h" 40 41 /* Spinlock must be held by caller */ 42 static int t4_sched_class_fw_cmd(struct port_info *pi, 43 struct ch_sched_params *p, 44 enum sched_fw_ops op) 45 { 46 struct adapter *adap = pi->adapter; 47 struct sched_table *s = pi->sched_tbl; 48 struct sched_class *e; 49 int err = 0; 50 51 e = &s->tab[p->u.params.class]; 52 switch (op) { 53 case SCHED_FW_OP_ADD: 54 err = t4_sched_params(adap, p->type, 55 p->u.params.level, p->u.params.mode, 56 p->u.params.rateunit, 57 p->u.params.ratemode, 58 p->u.params.channel, e->idx, 59 p->u.params.minrate, p->u.params.maxrate, 60 p->u.params.weight, p->u.params.pktsize); 61 break; 62 default: 63 err = -ENOTSUPP; 64 break; 65 } 66 67 return err; 68 } 69 70 /* Spinlock must be held by caller */ 71 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, 72 enum sched_bind_type type, bool bind) 73 { 74 struct adapter *adap = pi->adapter; 75 u32 fw_mnem, fw_class, fw_param; 76 unsigned int pf = adap->pf; 77 unsigned int vf = 0; 78 int err = 0; 79 80 switch (type) { 81 case SCHED_QUEUE: { 82 struct sched_queue_entry *qe; 83 84 qe = (struct sched_queue_entry *)arg; 85 86 /* Create a template for the FW_PARAMS_CMD mnemonic and 87 * value (TX Scheduling Class in this case). 88 */ 89 fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 90 FW_PARAMS_PARAM_X_V( 91 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 92 fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; 93 fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); 94 95 pf = adap->pf; 96 vf = 0; 97 break; 98 } 99 default: 100 err = -ENOTSUPP; 101 goto out; 102 } 103 104 err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class); 105 106 out: 107 return err; 108 } 109 110 static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, 111 const unsigned int qid, 112 int *index) 113 { 114 struct sched_table *s = pi->sched_tbl; 115 struct sched_class *e, *end; 116 struct sched_class *found = NULL; 117 int i; 118 119 /* Look for a class with matching bound queue parameters */ 120 end = &s->tab[s->sched_size]; 121 for (e = &s->tab[0]; e != end; ++e) { 122 struct sched_queue_entry *qe; 123 124 i = 0; 125 if (e->state == SCHED_STATE_UNUSED) 126 continue; 127 128 list_for_each_entry(qe, &e->queue_list, list) { 129 if (qe->cntxt_id == qid) { 130 found = e; 131 if (index) 132 *index = i; 133 break; 134 } 135 i++; 136 } 137 138 if (found) 139 break; 140 } 141 142 return found; 143 } 144 145 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) 146 { 147 struct adapter *adap = pi->adapter; 148 struct sched_class *e; 149 struct sched_queue_entry *qe = NULL; 150 struct sge_eth_txq *txq; 151 unsigned int qid; 152 int index = -1; 153 int err = 0; 154 155 if (p->queue < 0 || p->queue >= pi->nqsets) 156 return -ERANGE; 157 158 txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 159 qid = txq->q.cntxt_id; 160 161 /* Find the existing class that the queue is bound to */ 162 e = t4_sched_queue_lookup(pi, qid, &index); 163 if (e && index >= 0) { 164 int i = 0; 165 166 spin_lock(&e->lock); 167 list_for_each_entry(qe, &e->queue_list, list) { 168 if (i == index) 169 break; 170 i++; 171 } 172 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, 173 false); 174 if (err) { 175 spin_unlock(&e->lock); 176 goto out; 177 } 178 179 list_del(&qe->list); 180 kvfree(qe); 181 if (atomic_dec_and_test(&e->refcnt)) { 182 e->state = SCHED_STATE_UNUSED; 183 memset(&e->info, 0, sizeof(e->info)); 184 } 185 spin_unlock(&e->lock); 186 } 187 out: 188 return err; 189 } 190 191 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) 192 { 193 struct adapter *adap = pi->adapter; 194 struct sched_table *s = pi->sched_tbl; 195 struct sched_class *e; 196 struct sched_queue_entry *qe = NULL; 197 struct sge_eth_txq *txq; 198 unsigned int qid; 199 int err = 0; 200 201 if (p->queue < 0 || p->queue >= pi->nqsets) 202 return -ERANGE; 203 204 qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL); 205 if (!qe) 206 return -ENOMEM; 207 208 txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 209 qid = txq->q.cntxt_id; 210 211 /* Unbind queue from any existing class */ 212 err = t4_sched_queue_unbind(pi, p); 213 if (err) { 214 kvfree(qe); 215 goto out; 216 } 217 218 /* Bind queue to specified class */ 219 memset(qe, 0, sizeof(*qe)); 220 qe->cntxt_id = qid; 221 memcpy(&qe->param, p, sizeof(qe->param)); 222 223 e = &s->tab[qe->param.class]; 224 spin_lock(&e->lock); 225 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); 226 if (err) { 227 kvfree(qe); 228 spin_unlock(&e->lock); 229 goto out; 230 } 231 232 list_add_tail(&qe->list, &e->queue_list); 233 atomic_inc(&e->refcnt); 234 spin_unlock(&e->lock); 235 out: 236 return err; 237 } 238 239 static void t4_sched_class_unbind_all(struct port_info *pi, 240 struct sched_class *e, 241 enum sched_bind_type type) 242 { 243 if (!e) 244 return; 245 246 switch (type) { 247 case SCHED_QUEUE: { 248 struct sched_queue_entry *qe; 249 250 list_for_each_entry(qe, &e->queue_list, list) 251 t4_sched_queue_unbind(pi, &qe->param); 252 break; 253 } 254 default: 255 break; 256 } 257 } 258 259 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, 260 enum sched_bind_type type, bool bind) 261 { 262 int err = 0; 263 264 if (!arg) 265 return -EINVAL; 266 267 switch (type) { 268 case SCHED_QUEUE: { 269 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 270 271 if (bind) 272 err = t4_sched_queue_bind(pi, qe); 273 else 274 err = t4_sched_queue_unbind(pi, qe); 275 break; 276 } 277 default: 278 err = -ENOTSUPP; 279 break; 280 } 281 282 return err; 283 } 284 285 /** 286 * cxgb4_sched_class_bind - Bind an entity to a scheduling class 287 * @dev: net_device pointer 288 * @arg: Entity opaque data 289 * @type: Entity type (Queue) 290 * 291 * Binds an entity (queue) to a scheduling class. If the entity 292 * is bound to another class, it will be unbound from the other class 293 * and bound to the class specified in @arg. 294 */ 295 int cxgb4_sched_class_bind(struct net_device *dev, void *arg, 296 enum sched_bind_type type) 297 { 298 struct port_info *pi = netdev2pinfo(dev); 299 struct sched_table *s; 300 int err = 0; 301 u8 class_id; 302 303 if (!can_sched(dev)) 304 return -ENOTSUPP; 305 306 if (!arg) 307 return -EINVAL; 308 309 switch (type) { 310 case SCHED_QUEUE: { 311 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 312 313 class_id = qe->class; 314 break; 315 } 316 default: 317 return -ENOTSUPP; 318 } 319 320 if (!valid_class_id(dev, class_id)) 321 return -EINVAL; 322 323 if (class_id == SCHED_CLS_NONE) 324 return -ENOTSUPP; 325 326 s = pi->sched_tbl; 327 write_lock(&s->rw_lock); 328 err = t4_sched_class_bind_unbind_op(pi, arg, type, true); 329 write_unlock(&s->rw_lock); 330 331 return err; 332 } 333 334 /** 335 * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class 336 * @dev: net_device pointer 337 * @arg: Entity opaque data 338 * @type: Entity type (Queue) 339 * 340 * Unbinds an entity (queue) from a scheduling class. 341 */ 342 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, 343 enum sched_bind_type type) 344 { 345 struct port_info *pi = netdev2pinfo(dev); 346 struct sched_table *s; 347 int err = 0; 348 u8 class_id; 349 350 if (!can_sched(dev)) 351 return -ENOTSUPP; 352 353 if (!arg) 354 return -EINVAL; 355 356 switch (type) { 357 case SCHED_QUEUE: { 358 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 359 360 class_id = qe->class; 361 break; 362 } 363 default: 364 return -ENOTSUPP; 365 } 366 367 if (!valid_class_id(dev, class_id)) 368 return -EINVAL; 369 370 s = pi->sched_tbl; 371 write_lock(&s->rw_lock); 372 err = t4_sched_class_bind_unbind_op(pi, arg, type, false); 373 write_unlock(&s->rw_lock); 374 375 return err; 376 } 377 378 /* If @p is NULL, fetch any available unused class */ 379 static struct sched_class *t4_sched_class_lookup(struct port_info *pi, 380 const struct ch_sched_params *p) 381 { 382 struct sched_table *s = pi->sched_tbl; 383 struct sched_class *e, *end; 384 struct sched_class *found = NULL; 385 386 if (!p) { 387 /* Get any available unused class */ 388 end = &s->tab[s->sched_size]; 389 for (e = &s->tab[0]; e != end; ++e) { 390 if (e->state == SCHED_STATE_UNUSED) { 391 found = e; 392 break; 393 } 394 } 395 } else { 396 /* Look for a class with matching scheduling parameters */ 397 struct ch_sched_params info; 398 struct ch_sched_params tp; 399 400 memcpy(&tp, p, sizeof(tp)); 401 /* Don't try to match class parameter */ 402 tp.u.params.class = SCHED_CLS_NONE; 403 404 end = &s->tab[s->sched_size]; 405 for (e = &s->tab[0]; e != end; ++e) { 406 if (e->state == SCHED_STATE_UNUSED) 407 continue; 408 409 memcpy(&info, &e->info, sizeof(info)); 410 /* Don't try to match class parameter */ 411 info.u.params.class = SCHED_CLS_NONE; 412 413 if ((info.type == tp.type) && 414 (!memcmp(&info.u.params, &tp.u.params, 415 sizeof(info.u.params)))) { 416 found = e; 417 break; 418 } 419 } 420 } 421 422 return found; 423 } 424 425 static struct sched_class *t4_sched_class_alloc(struct port_info *pi, 426 struct ch_sched_params *p) 427 { 428 struct sched_table *s = pi->sched_tbl; 429 struct sched_class *e; 430 u8 class_id; 431 int err; 432 433 if (!p) 434 return NULL; 435 436 class_id = p->u.params.class; 437 438 /* Only accept search for existing class with matching params 439 * or allocation of new class with specified params 440 */ 441 if (class_id != SCHED_CLS_NONE) 442 return NULL; 443 444 write_lock(&s->rw_lock); 445 /* See if there's an exisiting class with same 446 * requested sched params 447 */ 448 e = t4_sched_class_lookup(pi, p); 449 if (!e) { 450 struct ch_sched_params np; 451 452 /* Fetch any available unused class */ 453 e = t4_sched_class_lookup(pi, NULL); 454 if (!e) 455 goto out; 456 457 memcpy(&np, p, sizeof(np)); 458 np.u.params.class = e->idx; 459 460 spin_lock(&e->lock); 461 /* New class */ 462 err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); 463 if (err) { 464 spin_unlock(&e->lock); 465 e = NULL; 466 goto out; 467 } 468 memcpy(&e->info, &np, sizeof(e->info)); 469 atomic_set(&e->refcnt, 0); 470 e->state = SCHED_STATE_ACTIVE; 471 spin_unlock(&e->lock); 472 } 473 474 out: 475 write_unlock(&s->rw_lock); 476 return e; 477 } 478 479 /** 480 * cxgb4_sched_class_alloc - allocate a scheduling class 481 * @dev: net_device pointer 482 * @p: new scheduling class to create. 483 * 484 * Returns pointer to the scheduling class created. If @p is NULL, then 485 * it allocates and returns any available unused scheduling class. If a 486 * scheduling class with matching @p is found, then the matching class is 487 * returned. 488 */ 489 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, 490 struct ch_sched_params *p) 491 { 492 struct port_info *pi = netdev2pinfo(dev); 493 u8 class_id; 494 495 if (!can_sched(dev)) 496 return NULL; 497 498 class_id = p->u.params.class; 499 if (!valid_class_id(dev, class_id)) 500 return NULL; 501 502 return t4_sched_class_alloc(pi, p); 503 } 504 505 static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) 506 { 507 t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); 508 } 509 510 struct sched_table *t4_init_sched(unsigned int sched_size) 511 { 512 struct sched_table *s; 513 unsigned int i; 514 515 s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL); 516 if (!s) 517 return NULL; 518 519 s->sched_size = sched_size; 520 rwlock_init(&s->rw_lock); 521 522 for (i = 0; i < s->sched_size; i++) { 523 memset(&s->tab[i], 0, sizeof(struct sched_class)); 524 s->tab[i].idx = i; 525 s->tab[i].state = SCHED_STATE_UNUSED; 526 INIT_LIST_HEAD(&s->tab[i].queue_list); 527 spin_lock_init(&s->tab[i].lock); 528 atomic_set(&s->tab[i].refcnt, 0); 529 } 530 return s; 531 } 532 533 void t4_cleanup_sched(struct adapter *adap) 534 { 535 struct sched_table *s; 536 unsigned int j, i; 537 538 for_each_port(adap, j) { 539 struct port_info *pi = netdev2pinfo(adap->port[j]); 540 541 s = pi->sched_tbl; 542 for (i = 0; i < s->sched_size; i++) { 543 struct sched_class *e; 544 545 write_lock(&s->rw_lock); 546 e = &s->tab[i]; 547 if (e->state == SCHED_STATE_ACTIVE) 548 t4_sched_class_free(pi, e); 549 write_unlock(&s->rw_lock); 550 } 551 kvfree(s); 552 } 553 } 554