1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
3 
4 #include "cxgb4.h"
5 #include "cxgb4_tc_mqprio.h"
6 #include "sched.h"
7 
8 static int cxgb4_mqprio_validate(struct net_device *dev,
9 				 struct tc_mqprio_qopt_offload *mqprio)
10 {
11 	u64 min_rate = 0, max_rate = 0, max_link_rate;
12 	struct port_info *pi = netdev2pinfo(dev);
13 	struct adapter *adap = netdev2adap(dev);
14 	u32 speed, qcount = 0, qoffset = 0;
15 	u32 start_a, start_b, end_a, end_b;
16 	int ret;
17 	u8 i, j;
18 
19 	if (!mqprio->qopt.num_tc)
20 		return 0;
21 
22 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
23 		netdev_err(dev, "Only full TC hardware offload is supported\n");
24 		return -EINVAL;
25 	} else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
26 		netdev_err(dev, "Only channel mode offload is supported\n");
27 		return -EINVAL;
28 	} else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
29 		netdev_err(dev,	"Only bandwidth rate shaper supported\n");
30 		return -EINVAL;
31 	} else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
32 		netdev_err(dev,
33 			   "Only %u traffic classes supported by hardware\n",
34 			   adap->params.nsched_cls);
35 		return -ERANGE;
36 	}
37 
38 	ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 	if (ret) {
40 		netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
41 		return -EINVAL;
42 	}
43 
44 	/* Convert from Mbps to bps */
45 	max_link_rate = (u64)speed * 1000 * 1000;
46 
47 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
48 		qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
49 		qcount += mqprio->qopt.count[i];
50 
51 		start_a = mqprio->qopt.offset[i];
52 		end_a = start_a + mqprio->qopt.count[i] - 1;
53 		for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
54 			start_b = mqprio->qopt.offset[j];
55 			end_b = start_b + mqprio->qopt.count[j] - 1;
56 
57 			/* If queue count is 0, then the traffic
58 			 * belonging to this class will not use
59 			 * ETHOFLD queues. So, no need to validate
60 			 * further.
61 			 */
62 			if (!mqprio->qopt.count[i])
63 				break;
64 
65 			if (!mqprio->qopt.count[j])
66 				continue;
67 
68 			if (max_t(u32, start_a, start_b) <=
69 			    min_t(u32, end_a, end_b)) {
70 				netdev_err(dev,
71 					   "Queues can't overlap across tc\n");
72 				return -EINVAL;
73 			}
74 		}
75 
76 		/* Convert byte per second to bits per second */
77 		min_rate += (mqprio->min_rate[i] * 8);
78 		max_rate += (mqprio->max_rate[i] * 8);
79 	}
80 
81 	if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
82 		return -ENOMEM;
83 
84 	if (min_rate > max_link_rate || max_rate > max_link_rate) {
85 		netdev_err(dev,
86 			   "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
87 			   min_rate, max_rate, max_link_rate);
88 		return -EINVAL;
89 	}
90 
91 	return 0;
92 }
93 
94 static int cxgb4_init_eosw_txq(struct net_device *dev,
95 			       struct sge_eosw_txq *eosw_txq,
96 			       u32 eotid, u32 hwqid)
97 {
98 	struct adapter *adap = netdev2adap(dev);
99 	struct tx_sw_desc *ring;
100 
101 	memset(eosw_txq, 0, sizeof(*eosw_txq));
102 
103 	ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
104 		       sizeof(*ring), GFP_KERNEL);
105 	if (!ring)
106 		return -ENOMEM;
107 
108 	eosw_txq->desc = ring;
109 	eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
110 	spin_lock_init(&eosw_txq->lock);
111 	eosw_txq->state = CXGB4_EO_STATE_CLOSED;
112 	eosw_txq->eotid = eotid;
113 	eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
114 	eosw_txq->cred = adap->params.ofldq_wr_cred;
115 	eosw_txq->hwqid = hwqid;
116 	eosw_txq->netdev = dev;
117 	tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
118 		     (unsigned long)eosw_txq);
119 	return 0;
120 }
121 
122 static void cxgb4_clean_eosw_txq(struct net_device *dev,
123 				 struct sge_eosw_txq *eosw_txq)
124 {
125 	struct adapter *adap = netdev2adap(dev);
126 
127 	cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
128 	eosw_txq->pidx = 0;
129 	eosw_txq->last_pidx = 0;
130 	eosw_txq->cidx = 0;
131 	eosw_txq->last_cidx = 0;
132 	eosw_txq->flowc_idx = 0;
133 	eosw_txq->inuse = 0;
134 	eosw_txq->cred = adap->params.ofldq_wr_cred;
135 	eosw_txq->ncompl = 0;
136 	eosw_txq->last_compl = 0;
137 	eosw_txq->state = CXGB4_EO_STATE_CLOSED;
138 }
139 
140 static void cxgb4_free_eosw_txq(struct net_device *dev,
141 				struct sge_eosw_txq *eosw_txq)
142 {
143 	spin_lock_bh(&eosw_txq->lock);
144 	cxgb4_clean_eosw_txq(dev, eosw_txq);
145 	kfree(eosw_txq->desc);
146 	spin_unlock_bh(&eosw_txq->lock);
147 	tasklet_kill(&eosw_txq->qresume_tsk);
148 }
149 
150 static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
151 {
152 	struct port_info *pi = netdev2pinfo(dev);
153 	struct adapter *adap = netdev2adap(dev);
154 	struct sge_ofld_rxq *eorxq;
155 	struct sge_eohw_txq *eotxq;
156 	int ret, msix = 0;
157 	u32 i;
158 
159 	/* Allocate ETHOFLD hardware queue structures if not done already */
160 	if (!refcount_read(&adap->tc_mqprio->refcnt)) {
161 		adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
162 					     sizeof(struct sge_ofld_rxq),
163 					     GFP_KERNEL);
164 		if (!adap->sge.eohw_rxq)
165 			return -ENOMEM;
166 
167 		adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
168 					     sizeof(struct sge_eohw_txq),
169 					     GFP_KERNEL);
170 		if (!adap->sge.eohw_txq) {
171 			kfree(adap->sge.eohw_rxq);
172 			return -ENOMEM;
173 		}
174 
175 		refcount_set(&adap->tc_mqprio->refcnt, 1);
176 	} else {
177 		refcount_inc(&adap->tc_mqprio->refcnt);
178 	}
179 
180 	if (!(adap->flags & CXGB4_USING_MSIX))
181 		msix = -((int)adap->sge.intrq.abs_id + 1);
182 
183 	for (i = 0; i < pi->nqsets; i++) {
184 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
185 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
186 
187 		/* Allocate Rxqs for receiving ETHOFLD Tx completions */
188 		if (msix >= 0) {
189 			msix = cxgb4_get_msix_idx_from_bmap(adap);
190 			if (msix < 0) {
191 				ret = msix;
192 				goto out_free_queues;
193 			}
194 
195 			eorxq->msix = &adap->msix_info[msix];
196 			snprintf(eorxq->msix->desc,
197 				 sizeof(eorxq->msix->desc),
198 				 "%s-eorxq%d", dev->name, i);
199 		}
200 
201 		init_rspq(adap, &eorxq->rspq,
202 			  CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
203 			  CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
204 			  CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
205 			  CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
206 
207 		eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
208 
209 		ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
210 				       dev, msix, &eorxq->fl,
211 				       cxgb4_ethofld_rx_handler,
212 				       NULL, 0);
213 		if (ret)
214 			goto out_free_queues;
215 
216 		/* Allocate ETHOFLD hardware Txqs */
217 		eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
218 		ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
219 					       eorxq->rspq.cntxt_id);
220 		if (ret)
221 			goto out_free_queues;
222 
223 		/* Allocate IRQs, set IRQ affinity, and start Rx */
224 		if (adap->flags & CXGB4_USING_MSIX) {
225 			ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
226 					  eorxq->msix->desc, &eorxq->rspq);
227 			if (ret)
228 				goto out_free_msix;
229 
230 			cxgb4_set_msix_aff(adap, eorxq->msix->vec,
231 					   &eorxq->msix->aff_mask, i);
232 		}
233 
234 		if (adap->flags & CXGB4_FULL_INIT_DONE)
235 			cxgb4_enable_rx(adap, &eorxq->rspq);
236 	}
237 
238 	return 0;
239 
240 out_free_msix:
241 	while (i-- > 0) {
242 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
243 
244 		if (adap->flags & CXGB4_FULL_INIT_DONE)
245 			cxgb4_quiesce_rx(&eorxq->rspq);
246 
247 		if (adap->flags & CXGB4_USING_MSIX) {
248 			cxgb4_clear_msix_aff(eorxq->msix->vec,
249 					     eorxq->msix->aff_mask);
250 			free_irq(eorxq->msix->vec, &eorxq->rspq);
251 		}
252 	}
253 
254 out_free_queues:
255 	for (i = 0; i < pi->nqsets; i++) {
256 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
257 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
258 
259 		if (eorxq->rspq.desc)
260 			free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
261 		if (eorxq->msix)
262 			cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
263 		t4_sge_free_ethofld_txq(adap, eotxq);
264 	}
265 
266 	if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
267 		kfree(adap->sge.eohw_txq);
268 		kfree(adap->sge.eohw_rxq);
269 	}
270 	return ret;
271 }
272 
273 static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
274 {
275 	struct port_info *pi = netdev2pinfo(dev);
276 	struct adapter *adap = netdev2adap(dev);
277 	struct sge_ofld_rxq *eorxq;
278 	struct sge_eohw_txq *eotxq;
279 	u32 i;
280 
281 	/* Return if no ETHOFLD structures have been allocated yet */
282 	if (!refcount_read(&adap->tc_mqprio->refcnt))
283 		return;
284 
285 	/* Return if no hardware queues have been allocated */
286 	if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
287 		return;
288 
289 	for (i = 0; i < pi->nqsets; i++) {
290 		eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
291 		eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
292 
293 		/* Device removal path will already disable NAPI
294 		 * before unregistering netdevice. So, only disable
295 		 * NAPI if we're not in device removal path
296 		 */
297 		if (!(adap->flags & CXGB4_SHUTTING_DOWN))
298 			cxgb4_quiesce_rx(&eorxq->rspq);
299 
300 		if (adap->flags & CXGB4_USING_MSIX) {
301 			cxgb4_clear_msix_aff(eorxq->msix->vec,
302 					     eorxq->msix->aff_mask);
303 			free_irq(eorxq->msix->vec, &eorxq->rspq);
304 			cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
305 		}
306 
307 		free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
308 		t4_sge_free_ethofld_txq(adap, eotxq);
309 	}
310 
311 	/* Free up ETHOFLD structures if there are no users */
312 	if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
313 		kfree(adap->sge.eohw_txq);
314 		kfree(adap->sge.eohw_rxq);
315 	}
316 }
317 
318 static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
319 				 struct tc_mqprio_qopt_offload *mqprio)
320 {
321 	struct ch_sched_params p = {
322 		.type = SCHED_CLASS_TYPE_PACKET,
323 		.u.params.level = SCHED_CLASS_LEVEL_CL_RL,
324 		.u.params.mode = SCHED_CLASS_MODE_FLOW,
325 		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
326 		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
327 		.u.params.class = SCHED_CLS_NONE,
328 		.u.params.weight = 0,
329 		.u.params.pktsize = dev->mtu,
330 	};
331 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
332 	struct port_info *pi = netdev2pinfo(dev);
333 	struct adapter *adap = netdev2adap(dev);
334 	struct sched_class *e;
335 	int ret;
336 	u8 i;
337 
338 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
339 	p.u.params.channel = pi->tx_chan;
340 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
341 		/* Convert from bytes per second to Kbps */
342 		p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
343 		p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
344 
345 		e = cxgb4_sched_class_alloc(dev, &p);
346 		if (!e) {
347 			ret = -ENOMEM;
348 			goto out_err;
349 		}
350 
351 		tc_port_mqprio->tc_hwtc_map[i] = e->idx;
352 	}
353 
354 	return 0;
355 
356 out_err:
357 	while (i--)
358 		cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
359 
360 	return ret;
361 }
362 
363 static void cxgb4_mqprio_free_tc(struct net_device *dev)
364 {
365 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
366 	struct port_info *pi = netdev2pinfo(dev);
367 	struct adapter *adap = netdev2adap(dev);
368 	u8 i;
369 
370 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
371 	for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
372 		cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
373 }
374 
375 static int cxgb4_mqprio_class_bind(struct net_device *dev,
376 				   struct sge_eosw_txq *eosw_txq,
377 				   u8 tc)
378 {
379 	struct ch_sched_flowc fe;
380 	int ret;
381 
382 	init_completion(&eosw_txq->completion);
383 
384 	fe.tid = eosw_txq->eotid;
385 	fe.class = tc;
386 
387 	ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
388 	if (ret)
389 		return ret;
390 
391 	ret = wait_for_completion_timeout(&eosw_txq->completion,
392 					  CXGB4_FLOWC_WAIT_TIMEOUT);
393 	if (!ret)
394 		return -ETIMEDOUT;
395 
396 	return 0;
397 }
398 
399 static void cxgb4_mqprio_class_unbind(struct net_device *dev,
400 				      struct sge_eosw_txq *eosw_txq,
401 				      u8 tc)
402 {
403 	struct adapter *adap = netdev2adap(dev);
404 	struct ch_sched_flowc fe;
405 
406 	/* If we're shutting down, interrupts are disabled and no completions
407 	 * come back. So, skip waiting for completions in this scenario.
408 	 */
409 	if (!(adap->flags & CXGB4_SHUTTING_DOWN))
410 		init_completion(&eosw_txq->completion);
411 
412 	fe.tid = eosw_txq->eotid;
413 	fe.class = tc;
414 	cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
415 
416 	if (!(adap->flags & CXGB4_SHUTTING_DOWN))
417 		wait_for_completion_timeout(&eosw_txq->completion,
418 					    CXGB4_FLOWC_WAIT_TIMEOUT);
419 }
420 
421 static int cxgb4_mqprio_enable_offload(struct net_device *dev,
422 				       struct tc_mqprio_qopt_offload *mqprio)
423 {
424 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
425 	u32 qoffset, qcount, tot_qcount, qid, hwqid;
426 	struct port_info *pi = netdev2pinfo(dev);
427 	struct adapter *adap = netdev2adap(dev);
428 	struct sge_eosw_txq *eosw_txq;
429 	int eotid, ret;
430 	u16 i, j;
431 	u8 hwtc;
432 
433 	ret = cxgb4_mqprio_alloc_hw_resources(dev);
434 	if (ret)
435 		return -ENOMEM;
436 
437 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
438 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
439 		qoffset = mqprio->qopt.offset[i];
440 		qcount = mqprio->qopt.count[i];
441 		for (j = 0; j < qcount; j++) {
442 			eotid = cxgb4_get_free_eotid(&adap->tids);
443 			if (eotid < 0) {
444 				ret = -ENOMEM;
445 				goto out_free_eotids;
446 			}
447 
448 			qid = qoffset + j;
449 			hwqid = pi->first_qset + (eotid % pi->nqsets);
450 			eosw_txq = &tc_port_mqprio->eosw_txq[qid];
451 			ret = cxgb4_init_eosw_txq(dev, eosw_txq,
452 						  eotid, hwqid);
453 			if (ret)
454 				goto out_free_eotids;
455 
456 			cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
457 
458 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
459 			ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
460 			if (ret)
461 				goto out_free_eotids;
462 		}
463 	}
464 
465 	memcpy(&tc_port_mqprio->mqprio, mqprio,
466 	       sizeof(struct tc_mqprio_qopt_offload));
467 
468 	/* Inform the stack about the configured tc params.
469 	 *
470 	 * Set the correct queue map. If no queue count has been
471 	 * specified, then send the traffic through default NIC
472 	 * queues; instead of ETHOFLD queues.
473 	 */
474 	ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
475 	if (ret)
476 		goto out_free_eotids;
477 
478 	tot_qcount = pi->nqsets;
479 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
480 		qcount = mqprio->qopt.count[i];
481 		if (qcount) {
482 			qoffset = mqprio->qopt.offset[i] + pi->nqsets;
483 		} else {
484 			qcount = pi->nqsets;
485 			qoffset = 0;
486 		}
487 
488 		ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
489 		if (ret)
490 			goto out_reset_tc;
491 
492 		tot_qcount += mqprio->qopt.count[i];
493 	}
494 
495 	ret = netif_set_real_num_tx_queues(dev, tot_qcount);
496 	if (ret)
497 		goto out_reset_tc;
498 
499 	tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
500 	return 0;
501 
502 out_reset_tc:
503 	netdev_reset_tc(dev);
504 	i = mqprio->qopt.num_tc;
505 
506 out_free_eotids:
507 	while (i-- > 0) {
508 		qoffset = mqprio->qopt.offset[i];
509 		qcount = mqprio->qopt.count[i];
510 		for (j = 0; j < qcount; j++) {
511 			eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
512 
513 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
514 			cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
515 
516 			cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
517 			cxgb4_free_eosw_txq(dev, eosw_txq);
518 		}
519 	}
520 
521 	cxgb4_mqprio_free_hw_resources(dev);
522 	return ret;
523 }
524 
525 static void cxgb4_mqprio_disable_offload(struct net_device *dev)
526 {
527 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
528 	struct port_info *pi = netdev2pinfo(dev);
529 	struct adapter *adap = netdev2adap(dev);
530 	struct sge_eosw_txq *eosw_txq;
531 	u32 qoffset, qcount;
532 	u16 i, j;
533 	u8 hwtc;
534 
535 	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
536 	if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
537 		return;
538 
539 	netdev_reset_tc(dev);
540 	netif_set_real_num_tx_queues(dev, pi->nqsets);
541 
542 	for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
543 		qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
544 		qcount = tc_port_mqprio->mqprio.qopt.count[i];
545 		for (j = 0; j < qcount; j++) {
546 			eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
547 
548 			hwtc = tc_port_mqprio->tc_hwtc_map[i];
549 			cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
550 
551 			cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
552 			cxgb4_free_eosw_txq(dev, eosw_txq);
553 		}
554 	}
555 
556 	cxgb4_mqprio_free_hw_resources(dev);
557 
558 	/* Free up the traffic classes */
559 	cxgb4_mqprio_free_tc(dev);
560 
561 	memset(&tc_port_mqprio->mqprio, 0,
562 	       sizeof(struct tc_mqprio_qopt_offload));
563 
564 	tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
565 }
566 
567 int cxgb4_setup_tc_mqprio(struct net_device *dev,
568 			  struct tc_mqprio_qopt_offload *mqprio)
569 {
570 	bool needs_bring_up = false;
571 	int ret;
572 
573 	ret = cxgb4_mqprio_validate(dev, mqprio);
574 	if (ret)
575 		return ret;
576 
577 	/* To configure tc params, the current allocated EOTIDs must
578 	 * be freed up. However, they can't be freed up if there's
579 	 * traffic running on the interface. So, ensure interface is
580 	 * down before configuring tc params.
581 	 */
582 	if (netif_running(dev)) {
583 		cxgb_close(dev);
584 		needs_bring_up = true;
585 	}
586 
587 	cxgb4_mqprio_disable_offload(dev);
588 
589 	/* If requested for clear, then just return since resources are
590 	 * already freed up by now.
591 	 */
592 	if (!mqprio->qopt.num_tc)
593 		goto out;
594 
595 	/* Allocate free available traffic classes and configure
596 	 * their rate parameters.
597 	 */
598 	ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
599 	if (ret)
600 		goto out;
601 
602 	ret = cxgb4_mqprio_enable_offload(dev, mqprio);
603 	if (ret) {
604 		cxgb4_mqprio_free_tc(dev);
605 		goto out;
606 	}
607 
608 out:
609 	if (needs_bring_up)
610 		cxgb_open(dev);
611 
612 	return ret;
613 }
614 
615 void cxgb4_mqprio_stop_offload(struct adapter *adap)
616 {
617 	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
618 	struct net_device *dev;
619 	u8 i;
620 
621 	if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
622 		return;
623 
624 	for_each_port(adap, i) {
625 		dev = adap->port[i];
626 		if (!dev)
627 			continue;
628 
629 		tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
630 		if (!tc_port_mqprio->mqprio.qopt.num_tc)
631 			continue;
632 
633 		cxgb4_mqprio_disable_offload(dev);
634 	}
635 }
636 
637 int cxgb4_init_tc_mqprio(struct adapter *adap)
638 {
639 	struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
640 	struct cxgb4_tc_mqprio *tc_mqprio;
641 	struct sge_eosw_txq *eosw_txq;
642 	int ret = 0;
643 	u8 i;
644 
645 	tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
646 	if (!tc_mqprio)
647 		return -ENOMEM;
648 
649 	tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
650 				 GFP_KERNEL);
651 	if (!tc_port_mqprio) {
652 		ret = -ENOMEM;
653 		goto out_free_mqprio;
654 	}
655 
656 	tc_mqprio->port_mqprio = tc_port_mqprio;
657 	for (i = 0; i < adap->params.nports; i++) {
658 		port_mqprio = &tc_mqprio->port_mqprio[i];
659 		eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
660 				   GFP_KERNEL);
661 		if (!eosw_txq) {
662 			ret = -ENOMEM;
663 			goto out_free_ports;
664 		}
665 		port_mqprio->eosw_txq = eosw_txq;
666 	}
667 
668 	adap->tc_mqprio = tc_mqprio;
669 	refcount_set(&adap->tc_mqprio->refcnt, 0);
670 	return 0;
671 
672 out_free_ports:
673 	for (i = 0; i < adap->params.nports; i++) {
674 		port_mqprio = &tc_mqprio->port_mqprio[i];
675 		kfree(port_mqprio->eosw_txq);
676 	}
677 	kfree(tc_port_mqprio);
678 
679 out_free_mqprio:
680 	kfree(tc_mqprio);
681 	return ret;
682 }
683 
684 void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
685 {
686 	struct cxgb4_tc_port_mqprio *port_mqprio;
687 	u8 i;
688 
689 	if (adap->tc_mqprio) {
690 		if (adap->tc_mqprio->port_mqprio) {
691 			for (i = 0; i < adap->params.nports; i++) {
692 				struct net_device *dev = adap->port[i];
693 
694 				if (dev)
695 					cxgb4_mqprio_disable_offload(dev);
696 				port_mqprio = &adap->tc_mqprio->port_mqprio[i];
697 				kfree(port_mqprio->eosw_txq);
698 			}
699 			kfree(adap->tc_mqprio->port_mqprio);
700 		}
701 		kfree(adap->tc_mqprio);
702 	}
703 }
704