1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include "otx2_common.h"
9 
10 static int otx2_check_pfc_config(struct otx2_nic *pfvf)
11 {
12 	u8 tx_queues = pfvf->hw.tx_queues, prio;
13 	u8 pfc_en = pfvf->pfc_en;
14 
15 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
16 		if ((pfc_en & (1 << prio)) &&
17 		    prio > tx_queues - 1) {
18 			dev_warn(pfvf->dev,
19 				 "Increase number of tx queues from %d to %d to support PFC.\n",
20 				 tx_queues, prio + 1);
21 			return -EINVAL;
22 		}
23 	}
24 
25 	return 0;
26 }
27 
28 int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
29 {
30 	u8 pfc_en, pfc_bit_set;
31 	int prio, lvl, err;
32 
33 	pfc_en = pfvf->pfc_en;
34 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
35 		pfc_bit_set = pfc_en & (1 << prio);
36 
37 		/* Either PFC bit is not set
38 		 * or tx scheduler is not allocated for the priority
39 		 */
40 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
41 			continue;
42 
43 		/* configure the scheduler for the tls*/
44 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
45 			err = otx2_txschq_config(pfvf, lvl, prio, true);
46 			if (err) {
47 				dev_err(pfvf->dev,
48 					"%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
49 					__func__, lvl, prio);
50 				return err;
51 			}
52 		}
53 	}
54 
55 	return 0;
56 }
57 
58 static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
59 {
60 	struct nix_txsch_alloc_req *req;
61 	struct nix_txsch_alloc_rsp *rsp;
62 	int lvl, rc;
63 
64 	/* Get memory to put this msg */
65 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
66 	if (!req)
67 		return -ENOMEM;
68 
69 	/* Request one schq per level upto max level as configured
70 	 * link config level. These rest of the scheduler can be
71 	 * same as hw.txschq_list.
72 	 */
73 	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
74 		req->schq[lvl] = 1;
75 
76 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
77 	if (rc)
78 		return rc;
79 
80 	rsp = (struct nix_txsch_alloc_rsp *)
81 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
82 	if (IS_ERR(rsp))
83 		return PTR_ERR(rsp);
84 
85 	/* Setup transmit scheduler list */
86 	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
87 		if (!rsp->schq[lvl])
88 			return -ENOSPC;
89 
90 		pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
91 	}
92 
93 	/* Set the Tx schedulers for rest of the levels same as
94 	 * hw.txschq_list as those will be common for all.
95 	 */
96 	for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
97 		pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
98 
99 	pfvf->pfc_alloc_status[prio] = true;
100 	return 0;
101 }
102 
103 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
104 {
105 	u8 pfc_en = pfvf->pfc_en;
106 	u8 pfc_bit_set;
107 	int err, prio;
108 
109 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
110 		pfc_bit_set = pfc_en & (1 << prio);
111 
112 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
113 			continue;
114 
115 		/* Add new scheduler to the priority */
116 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
117 		if (err) {
118 			dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
119 			return err;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
127 {
128 	struct nix_txsch_free_req *free_req;
129 
130 	mutex_lock(&pfvf->mbox.lock);
131 	/* free PFC TLx nodes */
132 	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
133 	if (!free_req) {
134 		mutex_unlock(&pfvf->mbox.lock);
135 		return -ENOMEM;
136 	}
137 
138 	free_req->flags = TXSCHQ_FREE_ALL;
139 	otx2_sync_mbox_msg(&pfvf->mbox);
140 	mutex_unlock(&pfvf->mbox.lock);
141 
142 	pfvf->pfc_alloc_status[prio] = false;
143 	return 0;
144 }
145 
146 static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
147 {
148 	struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
149 	struct net_device *dev = pfvf->netdev;
150 	bool if_up = netif_running(dev);
151 	struct nix_aq_enq_req *sq_aq;
152 
153 	if (if_up) {
154 		if (pfvf->pfc_alloc_status[prio])
155 			netif_tx_stop_all_queues(pfvf->netdev);
156 		else
157 			netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
158 	}
159 
160 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
161 		cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
162 		if (!cn10k_sq_aq)
163 			return -ENOMEM;
164 
165 		/* Fill AQ info */
166 		cn10k_sq_aq->qidx = prio;
167 		cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
168 		cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
169 
170 		/* Fill fields to update */
171 		cn10k_sq_aq->sq.ena = 1;
172 		cn10k_sq_aq->sq_mask.ena = 1;
173 		cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
174 		cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
175 	} else {
176 		sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
177 		if (!sq_aq)
178 			return -ENOMEM;
179 
180 		/* Fill AQ info */
181 		sq_aq->qidx = prio;
182 		sq_aq->ctype = NIX_AQ_CTYPE_SQ;
183 		sq_aq->op = NIX_AQ_INSTOP_WRITE;
184 
185 		/* Fill fields to update */
186 		sq_aq->sq.ena = 1;
187 		sq_aq->sq_mask.ena = 1;
188 		sq_aq->sq_mask.smq = GENMASK(8, 0);
189 		sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
190 	}
191 
192 	otx2_sync_mbox_msg(&pfvf->mbox);
193 
194 	if (if_up) {
195 		if (pfvf->pfc_alloc_status[prio])
196 			netif_tx_start_all_queues(pfvf->netdev);
197 		else
198 			netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
199 	}
200 
201 	return 0;
202 }
203 
204 int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
205 {
206 	bool if_up = netif_running(pfvf->netdev);
207 	u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
208 	struct mbox *mbox = &pfvf->mbox;
209 	int err, prio;
210 
211 	mutex_lock(&mbox->lock);
212 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
213 		pfc_bit_set = pfc_en & (1 << prio);
214 
215 		/* tx scheduler was created but user wants to disable now */
216 		if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
217 			mutex_unlock(&mbox->lock);
218 			if (if_up)
219 				netif_tx_stop_all_queues(pfvf->netdev);
220 
221 			otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
222 			if (if_up)
223 				netif_tx_start_all_queues(pfvf->netdev);
224 
225 			/* delete the schq */
226 			err = otx2_pfc_txschq_stop_one(pfvf, prio);
227 			if (err) {
228 				dev_err(pfvf->dev,
229 					"%s failed to stop PFC tx schedulers for priority: %d\n",
230 					__func__, prio);
231 				return err;
232 			}
233 
234 			mutex_lock(&mbox->lock);
235 			goto update_sq_smq_map;
236 		}
237 
238 		/* Either PFC bit is not set
239 		 * or Tx scheduler is already mapped for the priority
240 		 */
241 		if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
242 			continue;
243 
244 		/* Add new scheduler to the priority */
245 		err = otx2_pfc_txschq_alloc_one(pfvf, prio);
246 		if (err) {
247 			mutex_unlock(&mbox->lock);
248 			dev_err(pfvf->dev,
249 				"%s failed to allocate PFC tx schedulers for priority: %d\n",
250 				__func__, prio);
251 			return err;
252 		}
253 
254 update_sq_smq_map:
255 		err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
256 		if (err) {
257 			mutex_unlock(&mbox->lock);
258 			dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
259 			return err;
260 		}
261 	}
262 
263 	err = otx2_pfc_txschq_config(pfvf);
264 	mutex_unlock(&mbox->lock);
265 	if (err)
266 		return err;
267 
268 	return 0;
269 }
270 
271 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
272 {
273 	u8 pfc_en, pfc_bit_set;
274 	int prio, err;
275 
276 	pfc_en = pfvf->pfc_en;
277 	for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
278 		pfc_bit_set = pfc_en & (1 << prio);
279 		if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
280 			continue;
281 
282 		/* Delete the existing scheduler */
283 		err = otx2_pfc_txschq_stop_one(pfvf, prio);
284 		if (err) {
285 			dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
286 			return err;
287 		}
288 	}
289 
290 	return 0;
291 }
292 
293 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
294 {
295 	struct cgx_pfc_cfg *req;
296 	struct cgx_pfc_rsp *rsp;
297 	int err = 0;
298 
299 	if (is_otx2_lbkvf(pfvf->pdev))
300 		return 0;
301 
302 	mutex_lock(&pfvf->mbox.lock);
303 	req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
304 	if (!req) {
305 		err = -ENOMEM;
306 		goto unlock;
307 	}
308 
309 	if (pfvf->pfc_en) {
310 		req->rx_pause = true;
311 		req->tx_pause = true;
312 	} else {
313 		req->rx_pause = false;
314 		req->tx_pause = false;
315 	}
316 	req->pfc_en = pfvf->pfc_en;
317 
318 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
319 		rsp = (struct cgx_pfc_rsp *)
320 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
321 		if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
322 			dev_warn(pfvf->dev,
323 				 "Failed to config PFC\n");
324 			err = -EPERM;
325 		}
326 	}
327 unlock:
328 	mutex_unlock(&pfvf->mbox.lock);
329 	return err;
330 }
331 
332 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
333 			       bool pfc_enable)
334 {
335 	bool if_up = netif_running(pfvf->netdev);
336 	struct npa_aq_enq_req *npa_aq;
337 	struct nix_aq_enq_req *aq;
338 	int err = 0;
339 
340 	if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
341 		dev_warn(pfvf->dev,
342 			 "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
343 			 pfvf->queue_to_pfc_map[qidx], qidx);
344 		return;
345 	}
346 
347 	if (if_up) {
348 		netif_tx_stop_all_queues(pfvf->netdev);
349 		netif_carrier_off(pfvf->netdev);
350 	}
351 
352 	pfvf->queue_to_pfc_map[qidx] = vlan_prio;
353 
354 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
355 	if (!aq) {
356 		err = -ENOMEM;
357 		goto out;
358 	}
359 
360 	aq->cq.bpid = pfvf->bpid[vlan_prio];
361 	aq->cq_mask.bpid = GENMASK(8, 0);
362 
363 	/* Fill AQ info */
364 	aq->qidx = qidx;
365 	aq->ctype = NIX_AQ_CTYPE_CQ;
366 	aq->op = NIX_AQ_INSTOP_WRITE;
367 
368 	otx2_sync_mbox_msg(&pfvf->mbox);
369 
370 	npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
371 	if (!npa_aq) {
372 		err = -ENOMEM;
373 		goto out;
374 	}
375 	npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
376 	npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
377 
378 	/* Fill NPA AQ info */
379 	npa_aq->aura_id = qidx;
380 	npa_aq->ctype = NPA_AQ_CTYPE_AURA;
381 	npa_aq->op = NPA_AQ_INSTOP_WRITE;
382 	otx2_sync_mbox_msg(&pfvf->mbox);
383 
384 out:
385 	if (if_up) {
386 		netif_carrier_on(pfvf->netdev);
387 		netif_tx_start_all_queues(pfvf->netdev);
388 	}
389 
390 	if (err)
391 		dev_warn(pfvf->dev,
392 			 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
393 			 qidx, err);
394 }
395 
396 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
397 {
398 	struct otx2_nic *pfvf = netdev_priv(dev);
399 
400 	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
401 	pfc->pfc_en = pfvf->pfc_en;
402 
403 	return 0;
404 }
405 
406 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
407 {
408 	struct otx2_nic *pfvf = netdev_priv(dev);
409 	int err;
410 
411 	/* Save PFC configuration to interface */
412 	pfvf->pfc_en = pfc->pfc_en;
413 
414 	if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
415 		goto process_pfc;
416 
417 	/* Check if the PFC configuration can be
418 	 * supported by the tx queue configuration
419 	 */
420 	err = otx2_check_pfc_config(pfvf);
421 	if (err)
422 		return err;
423 
424 process_pfc:
425 	err = otx2_config_priority_flow_ctrl(pfvf);
426 	if (err)
427 		return err;
428 
429 	/* Request Per channel Bpids */
430 	if (pfc->pfc_en)
431 		otx2_nix_config_bp(pfvf, true);
432 
433 	err = otx2_pfc_txschq_update(pfvf);
434 	if (err) {
435 		dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
436 		return err;
437 	}
438 
439 	return 0;
440 }
441 
442 static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
443 {
444 	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
445 }
446 
447 static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
448 {
449 	return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
450 }
451 
452 static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
453 	.ieee_getpfc	= otx2_dcbnl_ieee_getpfc,
454 	.ieee_setpfc	= otx2_dcbnl_ieee_setpfc,
455 	.getdcbx	= otx2_dcbnl_getdcbx,
456 	.setdcbx	= otx2_dcbnl_setdcbx,
457 };
458 
459 int otx2_dcbnl_set_ops(struct net_device *dev)
460 {
461 	struct otx2_nic *pfvf = netdev_priv(dev);
462 
463 	pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
464 					      GFP_KERNEL);
465 	if (!pfvf->queue_to_pfc_map)
466 		return -ENOMEM;
467 	dev->dcbnl_ops = &otx2_dcbnl_ops;
468 
469 	return 0;
470 }
471