1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
3 
4 #include "cxgb4.h"
5 #include "cxgb4_tc_mqprio.h"
6 #include "sched.h"
7 
8 static int cxgb4_mqprio_validate(struct net_device *dev,
9 				 struct tc_mqprio_qopt_offload *mqprio)
10 {
11 	u64 min_rate = 0, max_rate = 0, max_link_rate;
12 	struct port_info *pi = netdev2pinfo(dev);
13 	struct adapter *adap = netdev2adap(dev);
14 	u32 speed, qcount = 0, qoffset = 0;
15 	u32 start_a, start_b, end_a, end_b;
16 	int ret;
17 	u8 i, j;
18 
19 	if (!mqprio->qopt.num_tc)
20 		return 0;
21 
22 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
23 		netdev_err(dev, "Only full TC hardware offload is supported\n");
24 		return -EINVAL;
25 	} else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
26 		netdev_err(dev, "Only channel mode offload is supported\n");
27 		return -EINVAL;
28 	} else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
29 		netdev_err(dev,	"Only bandwidth rate shaper supported\n");
30 		return -EINVAL;
31 	} else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
32 		netdev_err(dev,
33 			   "Only %u traffic classes supported by hardware\n",
34 			   adap->params.nsched_cls);
35 		return -ERANGE;
36 	}
37 
38 	ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 	if (ret) {
40 		netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
41 		return -EINVAL;
42 	}
43 
44 	/* Convert from Mbps to bps */
45 	max_link_rate = (u64)speed * 1000 * 1000;
46 
47 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
48 		qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
49 		qcount += mqprio->qopt.count[i];
50 
51 		start_a = mqprio->qopt.offset[i];
52 		end_a = start_a + mqprio->qopt.count[i] - 1;
53 		for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
54 			start_b = mqprio->qopt.offset[j];
55 			end_b = start_b + mqprio->qopt.count[j] - 1;
56 
57 			/* If queue count is 0, then the traffic
58 			 * belonging to this class will not use
59 			 * ETHOFLD queues. So, no need to validate
60 			 * further.
61 			 */
62 			if (!mqprio->qopt.count[i])
63 				break;
64 
65 			if (!mqprio->qopt.count[j])
66 				continue;
67 
68 			if (max_t(u32, start_a, start_b) <=
69 			    min_t(u32, end_a, end_b)) {
70 				netdev_err(dev,
71 					   "Queues can't overlap across tc\n");
72 				return -EINVAL;
73 			}
74 		}
75 
76 		/* Convert byte per second to bits per second */
77 		min_rate += (mqprio->min_rate[i] * 8);
78 		max_rate += (mqprio->max_rate[i] * 8);
79 	}
80 
81 	if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
82 		return -ENOMEM;
83 
84 	if (min_rate > max_link_rate || max_rate > max_link_rate) {
85 		netdev_err(dev,
86 			   "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
87 			   min_rate, max_rate, max_link_rate);
88 		return -EINVAL;
89 	}
90 
91 	return 0;
92 }
93 
94 static int cxgb4_init_eosw_txq(struct net_device *dev,
95 			       struct sge_eosw_txq *eosw_txq,
96 			       u32 eotid, u32 hwqid)
97 {
98 	struct adapter *adap = netdev2adap(dev);
99 	struct tx_sw_desc *ring;
100 
101 	memset(eosw_txq, 0, sizeof(*eosw_txq));
102 
103 	ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
104 		       sizeof(*ring), GFP_KERNEL);
105 	if (!ring)
106 		return -ENOMEM;
107 
108 	eosw_txq->desc = ring;
109 	eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
110 	spin_lock_init(&eosw_txq->lock);
111 	eosw_txq->state = CXGB4_EO_STATE_CLOSED;
112 	eosw_txq->eotid = eotid;
113 	eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
114 	eosw_txq->cred = adap->params.ofldq_wr_cred;
115 	eosw_txq->hwqid = hwqid;
116 	eosw_txq->netdev = dev;
117 	tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
118 		     (unsigned long)eosw_txq);
119 	return 0;
120 }
121 
122 static void cxgb4_clean_eosw_txq(struct net_device *dev,
123 				 struct sge_eosw_txq *eosw_txq)
124 {
125 	struct adapter *adap = netdev2adap(dev);
126 
127 	cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
128 	eosw_txq->pidx = 0;
129 	eosw_txq->last_pidx = 0;
130 	eosw_txq->cidx = 0;
131 	eosw_txq->last_cidx = 0;
132 	eosw_txq->flowc_idx = 0;
133 	eosw_txq->inuse = 0;
134 	eosw_txq->cred = adap->params.ofldq_wr_cred;
135 	eosw_txq->ncompl = 0;
136 	eosw_txq->last_compl = 0;
137 	eosw_txq->state = CXGB4_EO_STATE_CLOSED;
138 }
139 
140 static void cxgb4_free_eosw_txq(struct net_device *dev,
141 				struct sge_eosw_txq *eosw_txq)
142 {
143 	spin_lock_bh(&eosw_txq->lock);
144 	cxgb4_clean_eosw_txq(dev, eosw_txq);
145 	kfree(eosw_txq->desc);
146 	spin_unlock_bh(&eosw_txq->lock);
147 	tasklet_kill(&eosw_txq->qresume_tsk);
148 }
149 
150 static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
151 {
152 	struct port_info *pi = netdev2pinfo(dev);
153 	struct adapter *adap = netdev2adap(dev);
154 	struct sge_ofld_rxq *eorxq;
155 	struct sge_eohw_txq *eotxq;
156 	int ret, msix = 0;
157 	u32 i;
158 
159 	/* Allocate ETHOFLD hardware queue structures if not done already */
160 	if (!refcount_read(&adap->tc_mqprio->refcnt)) {
161 		adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
162 					     sizeof(struct sge_ofld_rxq),
163 					     GFP_KERNEL);
164 		if (!adap->sge.eohw_rxq)
165 			return -ENOMEM;
166 
167 		adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
168 					     sizeof(struct sge_eohw_txq),
169 					     GFP_KERNEL);
170 		if (!adap->sge.eohw_txq) {
171 			kfree(adap->sge.eohw_rxq);
172 			return -ENOMEM;
173 		}
174 
175 		refcount_set(&adap->tc_mqprio->refcnt, 1);
176 	} else {
177 		refcount_inc(&adap->tc_mqprio->refcnt);
178 	}
179 
180 	if (!(adap->flags & CXGB4_USING_MSIX))
181 		msix = -((int)adap->sge.intrq.abs_id + 1);
182 
183 	for (i = 0; i < pi->nqsets; i++) {
184 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
185 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
186 
187 		/* Allocate Rxqs for receiving ETHOFLD Tx completions */
188 		if (msix >= 0) {
189 			msix = cxgb4_get_msix_idx_from_bmap(adap);
190 			if (msix < 0) {
191 				ret = msix;
192 				goto out_free_queues;
193 			}
194 
195 			eorxq->msix = &adap->msix_info[msix];
196 			snprintf(eorxq->msix->desc,
197 				 sizeof(eorxq->msix->desc),
198 				 "%s-eorxq%d", dev->name, i);
199 		}
200 
201 		init_rspq(adap, &eorxq->rspq,
202 			  CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
203 			  CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
204 			  CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
205 			  CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
206 
207 		eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
208 
209 		ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
210 				       dev, msix, &eorxq->fl,
211 				       cxgb4_ethofld_rx_handler,
212 				       NULL, 0);
213 		if (ret)
214 			goto out_free_queues;
215 
216 		/* Allocate ETHOFLD hardware Txqs */
217 		eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
218 		ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
219 					       eorxq->rspq.cntxt_id);
220 		if (ret)
221 			goto out_free_queues;
222 
223 		/* Allocate IRQs, set IRQ affinity, and start Rx */
224 		if (adap->flags & CXGB4_USING_MSIX) {
225 			ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
226 					  eorxq->msix->desc, &eorxq->rspq);
227 			if (ret)
228 				goto out_free_msix;
229 
230 			cxgb4_set_msix_aff(adap, eorxq->msix->vec,
231 					   &eorxq->msix->aff_mask, i);
232 		}
233 
234 		if (adap->flags & CXGB4_FULL_INIT_DONE)
235 			cxgb4_enable_rx(adap, &eorxq->rspq);
236 	}
237 
238 	return 0;
239 
240 out_free_msix:
241 	while (i-- > 0) {
242 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
243 
244 		if (adap->flags & CXGB4_FULL_INIT_DONE)
245 			cxgb4_quiesce_rx(&eorxq->rspq);
246 
247 		if (adap->flags & CXGB4_USING_MSIX) {
248 			cxgb4_clear_msix_aff(eorxq->msix->vec,
249 					     eorxq->msix->aff_mask);
250 			free_irq(eorxq->msix->vec, &eorxq->rspq);
251 		}
252 	}
253 
254 out_free_queues:
255 	for (i = 0; i < pi->nqsets; i++) {
256 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
257 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
258 
259 		if (eorxq->rspq.desc)
260 			free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
261 		if (eorxq->msix)
262 			cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
263 		t4_sge_free_ethofld_txq(adap, eotxq);
264 	}
265 
266 	if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
267 		kfree(adap->sge.eohw_txq);
268 		kfree(adap->sge.eohw_rxq);
269 	}
270 	return ret;
271 }
272 
273 static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
274 {
275 	struct port_info *pi = netdev2pinfo(dev);
276 	struct adapter *adap = netdev2adap(dev);
277 	struct sge_ofld_rxq *eorxq;
278 	struct sge_eohw_txq *eotxq;
279 	u32 i;
280 
281 	/* Return if no ETHOFLD structures have been allocated yet */
282 	if (!refcount_read(&adap->tc_mqprio->refcnt))
283 		return;
284 
285 	/* Return if no hardware queues have been allocated */
286 	if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
287 		return;
288 
289 	for (i = 0; i < pi->nqsets; i++) {
290 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
291 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
292 
293 		/* Device removal path will already disable NAPI
294 		 * before unregistering netdevice. So, only disable
295 		 * NAPI if we're not in device removal path
296 		 */
297 		if (!(adap->flags & CXGB4_SHUTTING_DOWN))
298 			cxgb4_quiesce_rx(&eorxq->rspq);
299 
300 		if (adap->flags & CXGB4_USING_MSIX) {
301 			cxgb4_clear_msix_aff(eorxq->msix->vec,
302 					     eorxq->msix->aff_mask);
303 			free_irq(eorxq->msix->vec, &eorxq->rspq);
304 		}
305 
306 		free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
307 		t4_sge_free_ethofld_txq(adap, eotxq);
308 	}
309 
310 	/* Free up ETHOFLD structures if there are no users */
311 	if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
312 		kfree(adap->sge.eohw_txq);
313 		kfree(adap->sge.eohw_rxq);
314 	}
315 }
316 
317 static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
318 				 struct tc_mqprio_qopt_offload *mqprio)
319 {
320 	struct ch_sched_params p = {
321 		.type = SCHED_CLASS_TYPE_PACKET,
322 		.u.params.level = SCHED_CLASS_LEVEL_CL_RL,
323 		.u.params.mode = SCHED_CLASS_MODE_FLOW,
324 		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
325 		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
326 		.u.params.class = SCHED_CLS_NONE,
327 		.u.params.weight = 0,
328 		.u.params.pktsize = dev->mtu,
329 	};
330 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
331 	struct port_info *pi = netdev2pinfo(dev);
332 	struct adapter *adap = netdev2adap(dev);
333 	struct sched_class *e;
334 	int ret;
335 	u8 i;
336 
337 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
338 	p.u.params.channel = pi->tx_chan;
339 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
340 		/* Convert from bytes per second to Kbps */
341 		p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
342 		p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
343 
344 		e = cxgb4_sched_class_alloc(dev, &p);
345 		if (!e) {
346 			ret = -ENOMEM;
347 			goto out_err;
348 		}
349 
350 		tc_port_mqprio->tc_hwtc_map[i] = e->idx;
351 	}
352 
353 	return 0;
354 
355 out_err:
356 	while (i--)
357 		cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
358 
359 	return ret;
360 }
361 
362 static void cxgb4_mqprio_free_tc(struct net_device *dev)
363 {
364 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
365 	struct port_info *pi = netdev2pinfo(dev);
366 	struct adapter *adap = netdev2adap(dev);
367 	u8 i;
368 
369 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
370 	for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
371 		cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
372 }
373 
374 static int cxgb4_mqprio_class_bind(struct net_device *dev,
375 				   struct sge_eosw_txq *eosw_txq,
376 				   u8 tc)
377 {
378 	struct ch_sched_flowc fe;
379 	int ret;
380 
381 	init_completion(&eosw_txq->completion);
382 
383 	fe.tid = eosw_txq->eotid;
384 	fe.class = tc;
385 
386 	ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
387 	if (ret)
388 		return ret;
389 
390 	ret = wait_for_completion_timeout(&eosw_txq->completion,
391 					  CXGB4_FLOWC_WAIT_TIMEOUT);
392 	if (!ret)
393 		return -ETIMEDOUT;
394 
395 	return 0;
396 }
397 
398 static void cxgb4_mqprio_class_unbind(struct net_device *dev,
399 				      struct sge_eosw_txq *eosw_txq,
400 				      u8 tc)
401 {
402 	struct adapter *adap = netdev2adap(dev);
403 	struct ch_sched_flowc fe;
404 
405 	/* If we're shutting down, interrupts are disabled and no completions
406 	 * come back. So, skip waiting for completions in this scenario.
407 	 */
408 	if (!(adap->flags & CXGB4_SHUTTING_DOWN))
409 		init_completion(&eosw_txq->completion);
410 
411 	fe.tid = eosw_txq->eotid;
412 	fe.class = tc;
413 	cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
414 
415 	if (!(adap->flags & CXGB4_SHUTTING_DOWN))
416 		wait_for_completion_timeout(&eosw_txq->completion,
417 					    CXGB4_FLOWC_WAIT_TIMEOUT);
418 }
419 
420 static int cxgb4_mqprio_enable_offload(struct net_device *dev,
421 				       struct tc_mqprio_qopt_offload *mqprio)
422 {
423 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
424 	u32 qoffset, qcount, tot_qcount, qid, hwqid;
425 	struct port_info *pi = netdev2pinfo(dev);
426 	struct adapter *adap = netdev2adap(dev);
427 	struct sge_eosw_txq *eosw_txq;
428 	int eotid, ret;
429 	u16 i, j;
430 	u8 hwtc;
431 
432 	ret = cxgb4_mqprio_alloc_hw_resources(dev);
433 	if (ret)
434 		return -ENOMEM;
435 
436 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
437 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
438 		qoffset = mqprio->qopt.offset[i];
439 		qcount = mqprio->qopt.count[i];
440 		for (j = 0; j < qcount; j++) {
441 			eotid = cxgb4_get_free_eotid(&adap->tids);
442 			if (eotid < 0) {
443 				ret = -ENOMEM;
444 				goto out_free_eotids;
445 			}
446 
447 			qid = qoffset + j;
448 			hwqid = pi->first_qset + (eotid % pi->nqsets);
449 			eosw_txq = &tc_port_mqprio->eosw_txq[qid];
450 			ret = cxgb4_init_eosw_txq(dev, eosw_txq,
451 						  eotid, hwqid);
452 			if (ret)
453 				goto out_free_eotids;
454 
455 			cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
456 
457 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
458 			ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
459 			if (ret)
460 				goto out_free_eotids;
461 		}
462 	}
463 
464 	memcpy(&tc_port_mqprio->mqprio, mqprio,
465 	       sizeof(struct tc_mqprio_qopt_offload));
466 
467 	/* Inform the stack about the configured tc params.
468 	 *
469 	 * Set the correct queue map. If no queue count has been
470 	 * specified, then send the traffic through default NIC
471 	 * queues; instead of ETHOFLD queues.
472 	 */
473 	ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
474 	if (ret)
475 		goto out_free_eotids;
476 
477 	tot_qcount = pi->nqsets;
478 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
479 		qcount = mqprio->qopt.count[i];
480 		if (qcount) {
481 			qoffset = mqprio->qopt.offset[i] + pi->nqsets;
482 		} else {
483 			qcount = pi->nqsets;
484 			qoffset = 0;
485 		}
486 
487 		ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
488 		if (ret)
489 			goto out_reset_tc;
490 
491 		tot_qcount += mqprio->qopt.count[i];
492 	}
493 
494 	ret = netif_set_real_num_tx_queues(dev, tot_qcount);
495 	if (ret)
496 		goto out_reset_tc;
497 
498 	tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
499 	return 0;
500 
501 out_reset_tc:
502 	netdev_reset_tc(dev);
503 	i = mqprio->qopt.num_tc;
504 
505 out_free_eotids:
506 	while (i-- > 0) {
507 		qoffset = mqprio->qopt.offset[i];
508 		qcount = mqprio->qopt.count[i];
509 		for (j = 0; j < qcount; j++) {
510 			eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
511 
512 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
513 			cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
514 
515 			cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
516 			cxgb4_free_eosw_txq(dev, eosw_txq);
517 		}
518 	}
519 
520 	cxgb4_mqprio_free_hw_resources(dev);
521 	return ret;
522 }
523 
524 static void cxgb4_mqprio_disable_offload(struct net_device *dev)
525 {
526 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
527 	struct port_info *pi = netdev2pinfo(dev);
528 	struct adapter *adap = netdev2adap(dev);
529 	struct sge_eosw_txq *eosw_txq;
530 	u32 qoffset, qcount;
531 	u16 i, j;
532 	u8 hwtc;
533 
534 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
535 	if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
536 		return;
537 
538 	netdev_reset_tc(dev);
539 	netif_set_real_num_tx_queues(dev, pi->nqsets);
540 
541 	for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
542 		qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
543 		qcount = tc_port_mqprio->mqprio.qopt.count[i];
544 		for (j = 0; j < qcount; j++) {
545 			eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
546 
547 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
548 			cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
549 
550 			cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
551 			cxgb4_free_eosw_txq(dev, eosw_txq);
552 		}
553 	}
554 
555 	cxgb4_mqprio_free_hw_resources(dev);
556 
557 	/* Free up the traffic classes */
558 	cxgb4_mqprio_free_tc(dev);
559 
560 	memset(&tc_port_mqprio->mqprio, 0,
561 	       sizeof(struct tc_mqprio_qopt_offload));
562 
563 	tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
564 }
565 
566 int cxgb4_setup_tc_mqprio(struct net_device *dev,
567 			  struct tc_mqprio_qopt_offload *mqprio)
568 {
569 	bool needs_bring_up = false;
570 	int ret;
571 
572 	ret = cxgb4_mqprio_validate(dev, mqprio);
573 	if (ret)
574 		return ret;
575 
576 	/* To configure tc params, the current allocated EOTIDs must
577 	 * be freed up. However, they can't be freed up if there's
578 	 * traffic running on the interface. So, ensure interface is
579 	 * down before configuring tc params.
580 	 */
581 	if (netif_running(dev)) {
582 		cxgb_close(dev);
583 		needs_bring_up = true;
584 	}
585 
586 	cxgb4_mqprio_disable_offload(dev);
587 
588 	/* If requested for clear, then just return since resources are
589 	 * already freed up by now.
590 	 */
591 	if (!mqprio->qopt.num_tc)
592 		goto out;
593 
594 	/* Allocate free available traffic classes and configure
595 	 * their rate parameters.
596 	 */
597 	ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
598 	if (ret)
599 		goto out;
600 
601 	ret = cxgb4_mqprio_enable_offload(dev, mqprio);
602 	if (ret) {
603 		cxgb4_mqprio_free_tc(dev);
604 		goto out;
605 	}
606 
607 out:
608 	if (needs_bring_up)
609 		cxgb_open(dev);
610 
611 	return ret;
612 }
613 
614 int cxgb4_init_tc_mqprio(struct adapter *adap)
615 {
616 	struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
617 	struct cxgb4_tc_mqprio *tc_mqprio;
618 	struct sge_eosw_txq *eosw_txq;
619 	int ret = 0;
620 	u8 i;
621 
622 	tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
623 	if (!tc_mqprio)
624 		return -ENOMEM;
625 
626 	tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
627 				 GFP_KERNEL);
628 	if (!tc_port_mqprio) {
629 		ret = -ENOMEM;
630 		goto out_free_mqprio;
631 	}
632 
633 	tc_mqprio->port_mqprio = tc_port_mqprio;
634 	for (i = 0; i < adap->params.nports; i++) {
635 		port_mqprio = &tc_mqprio->port_mqprio[i];
636 		eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
637 				   GFP_KERNEL);
638 		if (!eosw_txq) {
639 			ret = -ENOMEM;
640 			goto out_free_ports;
641 		}
642 		port_mqprio->eosw_txq = eosw_txq;
643 	}
644 
645 	adap->tc_mqprio = tc_mqprio;
646 	refcount_set(&adap->tc_mqprio->refcnt, 0);
647 	return 0;
648 
649 out_free_ports:
650 	for (i = 0; i < adap->params.nports; i++) {
651 		port_mqprio = &tc_mqprio->port_mqprio[i];
652 		kfree(port_mqprio->eosw_txq);
653 	}
654 	kfree(tc_port_mqprio);
655 
656 out_free_mqprio:
657 	kfree(tc_mqprio);
658 	return ret;
659 }
660 
661 void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
662 {
663 	struct cxgb4_tc_port_mqprio *port_mqprio;
664 	u8 i;
665 
666 	if (adap->tc_mqprio) {
667 		if (adap->tc_mqprio->port_mqprio) {
668 			for (i = 0; i < adap->params.nports; i++) {
669 				struct net_device *dev = adap->port[i];
670 
671 				if (dev)
672 					cxgb4_mqprio_disable_offload(dev);
673 				port_mqprio = &adap->tc_mqprio->port_mqprio[i];
674 				kfree(port_mqprio->eosw_txq);
675 			}
676 			kfree(adap->tc_mqprio->port_mqprio);
677 		}
678 		kfree(adap->tc_mqprio);
679 	}
680 }
681