1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/netdevice.h>
37 
38 #include "cxgb4.h"
39 #include "sched.h"
40 
t4_sched_class_fw_cmd(struct port_info * pi,struct ch_sched_params * p,enum sched_fw_ops op)41 static int t4_sched_class_fw_cmd(struct port_info *pi,
42 				 struct ch_sched_params *p,
43 				 enum sched_fw_ops op)
44 {
45 	struct adapter *adap = pi->adapter;
46 	struct sched_table *s = pi->sched_tbl;
47 	struct sched_class *e;
48 	int err = 0;
49 
50 	e = &s->tab[p->u.params.class];
51 	switch (op) {
52 	case SCHED_FW_OP_ADD:
53 	case SCHED_FW_OP_DEL:
54 		err = t4_sched_params(adap, p->type,
55 				      p->u.params.level, p->u.params.mode,
56 				      p->u.params.rateunit,
57 				      p->u.params.ratemode,
58 				      p->u.params.channel, e->idx,
59 				      p->u.params.minrate, p->u.params.maxrate,
60 				      p->u.params.weight, p->u.params.pktsize,
61 				      p->u.params.burstsize);
62 		break;
63 	default:
64 		err = -ENOTSUPP;
65 		break;
66 	}
67 
68 	return err;
69 }
70 
t4_sched_bind_unbind_op(struct port_info * pi,void * arg,enum sched_bind_type type,bool bind)71 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
72 				   enum sched_bind_type type, bool bind)
73 {
74 	struct adapter *adap = pi->adapter;
75 	u32 fw_mnem, fw_class, fw_param;
76 	unsigned int pf = adap->pf;
77 	unsigned int vf = 0;
78 	int err = 0;
79 
80 	switch (type) {
81 	case SCHED_QUEUE: {
82 		struct sched_queue_entry *qe;
83 
84 		qe = (struct sched_queue_entry *)arg;
85 
86 		/* Create a template for the FW_PARAMS_CMD mnemonic and
87 		 * value (TX Scheduling Class in this case).
88 		 */
89 		fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
90 			   FW_PARAMS_PARAM_X_V(
91 				   FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
92 		fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
93 		fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
94 
95 		pf = adap->pf;
96 		vf = 0;
97 
98 		err = t4_set_params(adap, adap->mbox, pf, vf, 1,
99 				    &fw_param, &fw_class);
100 		break;
101 	}
102 	case SCHED_FLOWC: {
103 		struct sched_flowc_entry *fe;
104 
105 		fe = (struct sched_flowc_entry *)arg;
106 
107 		fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
108 		err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
109 					       fe->param.tid, fw_class);
110 		break;
111 	}
112 	default:
113 		err = -ENOTSUPP;
114 		break;
115 	}
116 
117 	return err;
118 }
119 
t4_sched_entry_lookup(struct port_info * pi,enum sched_bind_type type,const u32 val)120 static void *t4_sched_entry_lookup(struct port_info *pi,
121 				   enum sched_bind_type type,
122 				   const u32 val)
123 {
124 	struct sched_table *s = pi->sched_tbl;
125 	struct sched_class *e, *end;
126 	void *found = NULL;
127 
128 	/* Look for an entry with matching @val */
129 	end = &s->tab[s->sched_size];
130 	for (e = &s->tab[0]; e != end; ++e) {
131 		if (e->state == SCHED_STATE_UNUSED ||
132 		    e->bind_type != type)
133 			continue;
134 
135 		switch (type) {
136 		case SCHED_QUEUE: {
137 			struct sched_queue_entry *qe;
138 
139 			list_for_each_entry(qe, &e->entry_list, list) {
140 				if (qe->cntxt_id == val) {
141 					found = qe;
142 					break;
143 				}
144 			}
145 			break;
146 		}
147 		case SCHED_FLOWC: {
148 			struct sched_flowc_entry *fe;
149 
150 			list_for_each_entry(fe, &e->entry_list, list) {
151 				if (fe->param.tid == val) {
152 					found = fe;
153 					break;
154 				}
155 			}
156 			break;
157 		}
158 		default:
159 			return NULL;
160 		}
161 
162 		if (found)
163 			break;
164 	}
165 
166 	return found;
167 }
168 
cxgb4_sched_queue_lookup(struct net_device * dev,struct ch_sched_queue * p)169 struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
170 					     struct ch_sched_queue *p)
171 {
172 	struct port_info *pi = netdev2pinfo(dev);
173 	struct sched_queue_entry *qe = NULL;
174 	struct adapter *adap = pi->adapter;
175 	struct sge_eth_txq *txq;
176 
177 	if (p->queue < 0 || p->queue >= pi->nqsets)
178 		return NULL;
179 
180 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
181 	qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
182 	return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
183 }
184 
t4_sched_queue_unbind(struct port_info * pi,struct ch_sched_queue * p)185 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
186 {
187 	struct sched_queue_entry *qe = NULL;
188 	struct adapter *adap = pi->adapter;
189 	struct sge_eth_txq *txq;
190 	struct sched_class *e;
191 	int err = 0;
192 
193 	if (p->queue < 0 || p->queue >= pi->nqsets)
194 		return -ERANGE;
195 
196 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
197 
198 	/* Find the existing entry that the queue is bound to */
199 	qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
200 	if (qe) {
201 		err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
202 					      false);
203 		if (err)
204 			return err;
205 
206 		e = &pi->sched_tbl->tab[qe->param.class];
207 		list_del(&qe->list);
208 		kvfree(qe);
209 		if (atomic_dec_and_test(&e->refcnt))
210 			cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
211 	}
212 	return err;
213 }
214 
t4_sched_queue_bind(struct port_info * pi,struct ch_sched_queue * p)215 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
216 {
217 	struct sched_table *s = pi->sched_tbl;
218 	struct sched_queue_entry *qe = NULL;
219 	struct adapter *adap = pi->adapter;
220 	struct sge_eth_txq *txq;
221 	struct sched_class *e;
222 	unsigned int qid;
223 	int err = 0;
224 
225 	if (p->queue < 0 || p->queue >= pi->nqsets)
226 		return -ERANGE;
227 
228 	qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
229 	if (!qe)
230 		return -ENOMEM;
231 
232 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
233 	qid = txq->q.cntxt_id;
234 
235 	/* Unbind queue from any existing class */
236 	err = t4_sched_queue_unbind(pi, p);
237 	if (err)
238 		goto out_err;
239 
240 	/* Bind queue to specified class */
241 	qe->cntxt_id = qid;
242 	memcpy(&qe->param, p, sizeof(qe->param));
243 
244 	e = &s->tab[qe->param.class];
245 	err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
246 	if (err)
247 		goto out_err;
248 
249 	list_add_tail(&qe->list, &e->entry_list);
250 	e->bind_type = SCHED_QUEUE;
251 	atomic_inc(&e->refcnt);
252 	return err;
253 
254 out_err:
255 	kvfree(qe);
256 	return err;
257 }
258 
t4_sched_flowc_unbind(struct port_info * pi,struct ch_sched_flowc * p)259 static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
260 {
261 	struct sched_flowc_entry *fe = NULL;
262 	struct adapter *adap = pi->adapter;
263 	struct sched_class *e;
264 	int err = 0;
265 
266 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
267 		return -ERANGE;
268 
269 	/* Find the existing entry that the flowc is bound to */
270 	fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
271 	if (fe) {
272 		err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
273 					      false);
274 		if (err)
275 			return err;
276 
277 		e = &pi->sched_tbl->tab[fe->param.class];
278 		list_del(&fe->list);
279 		kvfree(fe);
280 		if (atomic_dec_and_test(&e->refcnt))
281 			cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
282 	}
283 	return err;
284 }
285 
t4_sched_flowc_bind(struct port_info * pi,struct ch_sched_flowc * p)286 static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
287 {
288 	struct sched_table *s = pi->sched_tbl;
289 	struct sched_flowc_entry *fe = NULL;
290 	struct adapter *adap = pi->adapter;
291 	struct sched_class *e;
292 	int err = 0;
293 
294 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
295 		return -ERANGE;
296 
297 	fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
298 	if (!fe)
299 		return -ENOMEM;
300 
301 	/* Unbind flowc from any existing class */
302 	err = t4_sched_flowc_unbind(pi, p);
303 	if (err)
304 		goto out_err;
305 
306 	/* Bind flowc to specified class */
307 	memcpy(&fe->param, p, sizeof(fe->param));
308 
309 	e = &s->tab[fe->param.class];
310 	err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
311 	if (err)
312 		goto out_err;
313 
314 	list_add_tail(&fe->list, &e->entry_list);
315 	e->bind_type = SCHED_FLOWC;
316 	atomic_inc(&e->refcnt);
317 	return err;
318 
319 out_err:
320 	kvfree(fe);
321 	return err;
322 }
323 
t4_sched_class_unbind_all(struct port_info * pi,struct sched_class * e,enum sched_bind_type type)324 static void t4_sched_class_unbind_all(struct port_info *pi,
325 				      struct sched_class *e,
326 				      enum sched_bind_type type)
327 {
328 	if (!e)
329 		return;
330 
331 	switch (type) {
332 	case SCHED_QUEUE: {
333 		struct sched_queue_entry *qe;
334 
335 		list_for_each_entry(qe, &e->entry_list, list)
336 			t4_sched_queue_unbind(pi, &qe->param);
337 		break;
338 	}
339 	case SCHED_FLOWC: {
340 		struct sched_flowc_entry *fe;
341 
342 		list_for_each_entry(fe, &e->entry_list, list)
343 			t4_sched_flowc_unbind(pi, &fe->param);
344 		break;
345 	}
346 	default:
347 		break;
348 	}
349 }
350 
t4_sched_class_bind_unbind_op(struct port_info * pi,void * arg,enum sched_bind_type type,bool bind)351 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
352 					 enum sched_bind_type type, bool bind)
353 {
354 	int err = 0;
355 
356 	if (!arg)
357 		return -EINVAL;
358 
359 	switch (type) {
360 	case SCHED_QUEUE: {
361 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
362 
363 		if (bind)
364 			err = t4_sched_queue_bind(pi, qe);
365 		else
366 			err = t4_sched_queue_unbind(pi, qe);
367 		break;
368 	}
369 	case SCHED_FLOWC: {
370 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
371 
372 		if (bind)
373 			err = t4_sched_flowc_bind(pi, fe);
374 		else
375 			err = t4_sched_flowc_unbind(pi, fe);
376 		break;
377 	}
378 	default:
379 		err = -ENOTSUPP;
380 		break;
381 	}
382 
383 	return err;
384 }
385 
386 /**
387  * cxgb4_sched_class_bind - Bind an entity to a scheduling class
388  * @dev: net_device pointer
389  * @arg: Entity opaque data
390  * @type: Entity type (Queue)
391  *
392  * Binds an entity (queue) to a scheduling class.  If the entity
393  * is bound to another class, it will be unbound from the other class
394  * and bound to the class specified in @arg.
395  */
cxgb4_sched_class_bind(struct net_device * dev,void * arg,enum sched_bind_type type)396 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
397 			   enum sched_bind_type type)
398 {
399 	struct port_info *pi = netdev2pinfo(dev);
400 	u8 class_id;
401 
402 	if (!can_sched(dev))
403 		return -ENOTSUPP;
404 
405 	if (!arg)
406 		return -EINVAL;
407 
408 	switch (type) {
409 	case SCHED_QUEUE: {
410 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
411 
412 		class_id = qe->class;
413 		break;
414 	}
415 	case SCHED_FLOWC: {
416 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
417 
418 		class_id = fe->class;
419 		break;
420 	}
421 	default:
422 		return -ENOTSUPP;
423 	}
424 
425 	if (!valid_class_id(dev, class_id))
426 		return -EINVAL;
427 
428 	if (class_id == SCHED_CLS_NONE)
429 		return -ENOTSUPP;
430 
431 	return t4_sched_class_bind_unbind_op(pi, arg, type, true);
432 
433 }
434 
435 /**
436  * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
437  * @dev: net_device pointer
438  * @arg: Entity opaque data
439  * @type: Entity type (Queue)
440  *
441  * Unbinds an entity (queue) from a scheduling class.
442  */
cxgb4_sched_class_unbind(struct net_device * dev,void * arg,enum sched_bind_type type)443 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
444 			     enum sched_bind_type type)
445 {
446 	struct port_info *pi = netdev2pinfo(dev);
447 	u8 class_id;
448 
449 	if (!can_sched(dev))
450 		return -ENOTSUPP;
451 
452 	if (!arg)
453 		return -EINVAL;
454 
455 	switch (type) {
456 	case SCHED_QUEUE: {
457 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
458 
459 		class_id = qe->class;
460 		break;
461 	}
462 	case SCHED_FLOWC: {
463 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
464 
465 		class_id = fe->class;
466 		break;
467 	}
468 	default:
469 		return -ENOTSUPP;
470 	}
471 
472 	if (!valid_class_id(dev, class_id))
473 		return -EINVAL;
474 
475 	return t4_sched_class_bind_unbind_op(pi, arg, type, false);
476 }
477 
478 /* If @p is NULL, fetch any available unused class */
t4_sched_class_lookup(struct port_info * pi,const struct ch_sched_params * p)479 static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
480 						const struct ch_sched_params *p)
481 {
482 	struct sched_table *s = pi->sched_tbl;
483 	struct sched_class *found = NULL;
484 	struct sched_class *e, *end;
485 
486 	if (!p) {
487 		/* Get any available unused class */
488 		end = &s->tab[s->sched_size];
489 		for (e = &s->tab[0]; e != end; ++e) {
490 			if (e->state == SCHED_STATE_UNUSED) {
491 				found = e;
492 				break;
493 			}
494 		}
495 	} else {
496 		/* Look for a class with matching scheduling parameters */
497 		struct ch_sched_params info;
498 		struct ch_sched_params tp;
499 
500 		memcpy(&tp, p, sizeof(tp));
501 		/* Don't try to match class parameter */
502 		tp.u.params.class = SCHED_CLS_NONE;
503 
504 		end = &s->tab[s->sched_size];
505 		for (e = &s->tab[0]; e != end; ++e) {
506 			if (e->state == SCHED_STATE_UNUSED)
507 				continue;
508 
509 			memcpy(&info, &e->info, sizeof(info));
510 			/* Don't try to match class parameter */
511 			info.u.params.class = SCHED_CLS_NONE;
512 
513 			if ((info.type == tp.type) &&
514 			    (!memcmp(&info.u.params, &tp.u.params,
515 				     sizeof(info.u.params)))) {
516 				found = e;
517 				break;
518 			}
519 		}
520 	}
521 
522 	return found;
523 }
524 
t4_sched_class_alloc(struct port_info * pi,struct ch_sched_params * p)525 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
526 						struct ch_sched_params *p)
527 {
528 	struct sched_class *e = NULL;
529 	u8 class_id;
530 	int err;
531 
532 	if (!p)
533 		return NULL;
534 
535 	class_id = p->u.params.class;
536 
537 	/* Only accept search for existing class with matching params
538 	 * or allocation of new class with specified params
539 	 */
540 	if (class_id != SCHED_CLS_NONE)
541 		return NULL;
542 
543 	/* See if there's an exisiting class with same requested sched
544 	 * params. Classes can only be shared among FLOWC types. For
545 	 * other types, always request a new class.
546 	 */
547 	if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
548 		e = t4_sched_class_lookup(pi, p);
549 
550 	if (!e) {
551 		struct ch_sched_params np;
552 
553 		/* Fetch any available unused class */
554 		e = t4_sched_class_lookup(pi, NULL);
555 		if (!e)
556 			return NULL;
557 
558 		memcpy(&np, p, sizeof(np));
559 		np.u.params.class = e->idx;
560 		/* New class */
561 		err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
562 		if (err)
563 			return NULL;
564 		memcpy(&e->info, &np, sizeof(e->info));
565 		atomic_set(&e->refcnt, 0);
566 		e->state = SCHED_STATE_ACTIVE;
567 	}
568 
569 	return e;
570 }
571 
572 /**
573  * cxgb4_sched_class_alloc - allocate a scheduling class
574  * @dev: net_device pointer
575  * @p: new scheduling class to create.
576  *
577  * Returns pointer to the scheduling class created.  If @p is NULL, then
578  * it allocates and returns any available unused scheduling class. If a
579  * scheduling class with matching @p is found, then the matching class is
580  * returned.
581  */
cxgb4_sched_class_alloc(struct net_device * dev,struct ch_sched_params * p)582 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
583 					    struct ch_sched_params *p)
584 {
585 	struct port_info *pi = netdev2pinfo(dev);
586 	u8 class_id;
587 
588 	if (!can_sched(dev))
589 		return NULL;
590 
591 	class_id = p->u.params.class;
592 	if (!valid_class_id(dev, class_id))
593 		return NULL;
594 
595 	return t4_sched_class_alloc(pi, p);
596 }
597 
598 /**
599  * cxgb4_sched_class_free - free a scheduling class
600  * @dev: net_device pointer
601  * @classid: scheduling class id to free
602  *
603  * Frees a scheduling class if there are no users.
604  */
cxgb4_sched_class_free(struct net_device * dev,u8 classid)605 void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
606 {
607 	struct port_info *pi = netdev2pinfo(dev);
608 	struct sched_table *s = pi->sched_tbl;
609 	struct ch_sched_params p;
610 	struct sched_class *e;
611 	u32 speed;
612 	int ret;
613 
614 	e = &s->tab[classid];
615 	if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
616 		/* Port based rate limiting needs explicit reset back
617 		 * to max rate. But, we'll do explicit reset for all
618 		 * types, instead of just port based type, to be on
619 		 * the safer side.
620 		 */
621 		memcpy(&p, &e->info, sizeof(p));
622 		/* Always reset mode to 0. Otherwise, FLOWC mode will
623 		 * still be enabled even after resetting the traffic
624 		 * class.
625 		 */
626 		p.u.params.mode = 0;
627 		p.u.params.minrate = 0;
628 		p.u.params.pktsize = 0;
629 
630 		ret = t4_get_link_params(pi, NULL, &speed, NULL);
631 		if (!ret)
632 			p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
633 		else
634 			p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
635 
636 		t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
637 
638 		e->state = SCHED_STATE_UNUSED;
639 		memset(&e->info, 0, sizeof(e->info));
640 	}
641 }
642 
t4_sched_class_free(struct net_device * dev,struct sched_class * e)643 static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
644 {
645 	struct port_info *pi = netdev2pinfo(dev);
646 
647 	t4_sched_class_unbind_all(pi, e, e->bind_type);
648 	cxgb4_sched_class_free(dev, e->idx);
649 }
650 
t4_init_sched(unsigned int sched_size)651 struct sched_table *t4_init_sched(unsigned int sched_size)
652 {
653 	struct sched_table *s;
654 	unsigned int i;
655 
656 	s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
657 	if (!s)
658 		return NULL;
659 
660 	s->sched_size = sched_size;
661 
662 	for (i = 0; i < s->sched_size; i++) {
663 		memset(&s->tab[i], 0, sizeof(struct sched_class));
664 		s->tab[i].idx = i;
665 		s->tab[i].state = SCHED_STATE_UNUSED;
666 		INIT_LIST_HEAD(&s->tab[i].entry_list);
667 		atomic_set(&s->tab[i].refcnt, 0);
668 	}
669 	return s;
670 }
671 
t4_cleanup_sched(struct adapter * adap)672 void t4_cleanup_sched(struct adapter *adap)
673 {
674 	struct sched_table *s;
675 	unsigned int j, i;
676 
677 	for_each_port(adap, j) {
678 		struct port_info *pi = netdev2pinfo(adap->port[j]);
679 
680 		s = pi->sched_tbl;
681 		if (!s)
682 			continue;
683 
684 		for (i = 0; i < s->sched_size; i++) {
685 			struct sched_class *e;
686 
687 			e = &s->tab[i];
688 			if (e->state == SCHED_STATE_ACTIVE)
689 				t4_sched_class_free(adap->port[j], e);
690 		}
691 		kvfree(s);
692 	}
693 }
694