1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8 #include "otx2_common.h"
9
otx2_check_pfc_config(struct otx2_nic * pfvf)10 static int otx2_check_pfc_config(struct otx2_nic *pfvf)
11 {
12 u8 tx_queues = pfvf->hw.tx_queues, prio;
13 u8 pfc_en = pfvf->pfc_en;
14
15 for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
16 if ((pfc_en & (1 << prio)) &&
17 prio > tx_queues - 1) {
18 dev_warn(pfvf->dev,
19 "Increase number of tx queues from %d to %d to support PFC.\n",
20 tx_queues, prio + 1);
21 return -EINVAL;
22 }
23 }
24
25 return 0;
26 }
27
otx2_pfc_txschq_config(struct otx2_nic * pfvf)28 int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
29 {
30 u8 pfc_en, pfc_bit_set;
31 int prio, lvl, err;
32
33 pfc_en = pfvf->pfc_en;
34 for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
35 pfc_bit_set = pfc_en & (1 << prio);
36
37 /* Either PFC bit is not set
38 * or tx scheduler is not allocated for the priority
39 */
40 if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
41 continue;
42
43 /* configure the scheduler for the tls*/
44 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
45 err = otx2_txschq_config(pfvf, lvl, prio, true);
46 if (err) {
47 dev_err(pfvf->dev,
48 "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
49 __func__, lvl, prio);
50 return err;
51 }
52 }
53 }
54
55 return 0;
56 }
57 EXPORT_SYMBOL(otx2_pfc_txschq_config);
58
otx2_pfc_txschq_alloc_one(struct otx2_nic * pfvf,u8 prio)59 static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
60 {
61 struct nix_txsch_alloc_req *req;
62 struct nix_txsch_alloc_rsp *rsp;
63 int lvl, rc;
64
65 /* Get memory to put this msg */
66 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
67 if (!req)
68 return -ENOMEM;
69
70 /* Request one schq per level upto max level as configured
71 * link config level. These rest of the scheduler can be
72 * same as hw.txschq_list.
73 */
74 for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
75 req->schq[lvl] = 1;
76
77 rc = otx2_sync_mbox_msg(&pfvf->mbox);
78 if (rc)
79 return rc;
80
81 rsp = (struct nix_txsch_alloc_rsp *)
82 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
83 if (IS_ERR(rsp))
84 return PTR_ERR(rsp);
85
86 /* Setup transmit scheduler list */
87 for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
88 if (!rsp->schq[lvl])
89 return -ENOSPC;
90
91 pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
92 }
93
94 /* Set the Tx schedulers for rest of the levels same as
95 * hw.txschq_list as those will be common for all.
96 */
97 for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
98 pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
99
100 pfvf->pfc_alloc_status[prio] = true;
101 return 0;
102 }
103
otx2_pfc_txschq_alloc(struct otx2_nic * pfvf)104 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
105 {
106 u8 pfc_en = pfvf->pfc_en;
107 u8 pfc_bit_set;
108 int err, prio;
109
110 for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
111 pfc_bit_set = pfc_en & (1 << prio);
112
113 if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
114 continue;
115
116 /* Add new scheduler to the priority */
117 err = otx2_pfc_txschq_alloc_one(pfvf, prio);
118 if (err) {
119 dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
120 return err;
121 }
122 }
123
124 return 0;
125 }
126 EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
127
otx2_pfc_txschq_stop_one(struct otx2_nic * pfvf,u8 prio)128 static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
129 {
130 int lvl;
131
132 /* free PFC TLx nodes */
133 for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
134 otx2_txschq_free_one(pfvf, lvl,
135 pfvf->pfc_schq_list[lvl][prio]);
136
137 pfvf->pfc_alloc_status[prio] = false;
138 return 0;
139 }
140
otx2_pfc_update_sq_smq_mapping(struct otx2_nic * pfvf,int prio)141 static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
142 {
143 struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
144 struct net_device *dev = pfvf->netdev;
145 bool if_up = netif_running(dev);
146 struct nix_aq_enq_req *sq_aq;
147
148 if (if_up) {
149 if (pfvf->pfc_alloc_status[prio])
150 netif_tx_stop_all_queues(pfvf->netdev);
151 else
152 netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
153 }
154
155 if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
156 cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
157 if (!cn10k_sq_aq)
158 return -ENOMEM;
159
160 /* Fill AQ info */
161 cn10k_sq_aq->qidx = prio;
162 cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
163 cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
164
165 /* Fill fields to update */
166 cn10k_sq_aq->sq.ena = 1;
167 cn10k_sq_aq->sq_mask.ena = 1;
168 cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
169 cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
170 } else {
171 sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
172 if (!sq_aq)
173 return -ENOMEM;
174
175 /* Fill AQ info */
176 sq_aq->qidx = prio;
177 sq_aq->ctype = NIX_AQ_CTYPE_SQ;
178 sq_aq->op = NIX_AQ_INSTOP_WRITE;
179
180 /* Fill fields to update */
181 sq_aq->sq.ena = 1;
182 sq_aq->sq_mask.ena = 1;
183 sq_aq->sq_mask.smq = GENMASK(8, 0);
184 sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
185 }
186
187 otx2_sync_mbox_msg(&pfvf->mbox);
188
189 if (if_up) {
190 if (pfvf->pfc_alloc_status[prio])
191 netif_tx_start_all_queues(pfvf->netdev);
192 else
193 netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
194 }
195
196 return 0;
197 }
198
otx2_pfc_txschq_update(struct otx2_nic * pfvf)199 int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
200 {
201 bool if_up = netif_running(pfvf->netdev);
202 u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
203 struct mbox *mbox = &pfvf->mbox;
204 int err, prio;
205
206 mutex_lock(&mbox->lock);
207 for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
208 pfc_bit_set = pfc_en & (1 << prio);
209
210 /* tx scheduler was created but user wants to disable now */
211 if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
212 mutex_unlock(&mbox->lock);
213 if (if_up)
214 netif_tx_stop_all_queues(pfvf->netdev);
215
216 otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
217 if (if_up)
218 netif_tx_start_all_queues(pfvf->netdev);
219
220 /* delete the schq */
221 err = otx2_pfc_txschq_stop_one(pfvf, prio);
222 if (err) {
223 dev_err(pfvf->dev,
224 "%s failed to stop PFC tx schedulers for priority: %d\n",
225 __func__, prio);
226 return err;
227 }
228
229 mutex_lock(&mbox->lock);
230 goto update_sq_smq_map;
231 }
232
233 /* Either PFC bit is not set
234 * or Tx scheduler is already mapped for the priority
235 */
236 if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
237 continue;
238
239 /* Add new scheduler to the priority */
240 err = otx2_pfc_txschq_alloc_one(pfvf, prio);
241 if (err) {
242 mutex_unlock(&mbox->lock);
243 dev_err(pfvf->dev,
244 "%s failed to allocate PFC tx schedulers for priority: %d\n",
245 __func__, prio);
246 return err;
247 }
248
249 update_sq_smq_map:
250 err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
251 if (err) {
252 mutex_unlock(&mbox->lock);
253 dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
254 return err;
255 }
256 }
257
258 err = otx2_pfc_txschq_config(pfvf);
259 mutex_unlock(&mbox->lock);
260 if (err)
261 return err;
262
263 return 0;
264 }
265 EXPORT_SYMBOL(otx2_pfc_txschq_update);
266
otx2_pfc_txschq_stop(struct otx2_nic * pfvf)267 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
268 {
269 u8 pfc_en, pfc_bit_set;
270 int prio, err;
271
272 pfc_en = pfvf->pfc_en;
273 for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
274 pfc_bit_set = pfc_en & (1 << prio);
275 if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
276 continue;
277
278 /* Delete the existing scheduler */
279 err = otx2_pfc_txschq_stop_one(pfvf, prio);
280 if (err) {
281 dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
282 return err;
283 }
284 }
285
286 return 0;
287 }
288 EXPORT_SYMBOL(otx2_pfc_txschq_stop);
289
otx2_config_priority_flow_ctrl(struct otx2_nic * pfvf)290 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
291 {
292 struct cgx_pfc_cfg *req;
293 struct cgx_pfc_rsp *rsp;
294 int err = 0;
295
296 if (is_otx2_lbkvf(pfvf->pdev))
297 return 0;
298
299 mutex_lock(&pfvf->mbox.lock);
300 req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
301 if (!req) {
302 err = -ENOMEM;
303 goto unlock;
304 }
305
306 if (pfvf->pfc_en) {
307 req->rx_pause = true;
308 req->tx_pause = true;
309 } else {
310 req->rx_pause = false;
311 req->tx_pause = false;
312 }
313 req->pfc_en = pfvf->pfc_en;
314
315 if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
316 rsp = (struct cgx_pfc_rsp *)
317 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
318 if (IS_ERR(rsp)) {
319 err = PTR_ERR(rsp);
320 goto unlock;
321 }
322
323 if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
324 dev_warn(pfvf->dev,
325 "Failed to config PFC\n");
326 err = -EPERM;
327 }
328 }
329 unlock:
330 mutex_unlock(&pfvf->mbox.lock);
331 return err;
332 }
333 EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
334
otx2_update_bpid_in_rqctx(struct otx2_nic * pfvf,int vlan_prio,int qidx,bool pfc_enable)335 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
336 bool pfc_enable)
337 {
338 bool if_up = netif_running(pfvf->netdev);
339 struct npa_aq_enq_req *npa_aq;
340 struct nix_aq_enq_req *aq;
341 int err = 0;
342
343 if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
344 dev_warn(pfvf->dev,
345 "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
346 pfvf->queue_to_pfc_map[qidx], qidx);
347 return;
348 }
349
350 if (if_up) {
351 netif_tx_stop_all_queues(pfvf->netdev);
352 netif_carrier_off(pfvf->netdev);
353 }
354
355 pfvf->queue_to_pfc_map[qidx] = vlan_prio;
356
357 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
358 if (!aq) {
359 err = -ENOMEM;
360 goto out;
361 }
362
363 aq->cq.bpid = pfvf->bpid[vlan_prio];
364 aq->cq_mask.bpid = GENMASK(8, 0);
365
366 /* Fill AQ info */
367 aq->qidx = qidx;
368 aq->ctype = NIX_AQ_CTYPE_CQ;
369 aq->op = NIX_AQ_INSTOP_WRITE;
370
371 otx2_sync_mbox_msg(&pfvf->mbox);
372
373 npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
374 if (!npa_aq) {
375 err = -ENOMEM;
376 goto out;
377 }
378 npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
379 npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
380
381 /* Fill NPA AQ info */
382 npa_aq->aura_id = qidx;
383 npa_aq->ctype = NPA_AQ_CTYPE_AURA;
384 npa_aq->op = NPA_AQ_INSTOP_WRITE;
385 otx2_sync_mbox_msg(&pfvf->mbox);
386
387 out:
388 if (if_up) {
389 netif_carrier_on(pfvf->netdev);
390 netif_tx_start_all_queues(pfvf->netdev);
391 }
392
393 if (err)
394 dev_warn(pfvf->dev,
395 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
396 qidx, err);
397 }
398 EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
399
otx2_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)400 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
401 {
402 struct otx2_nic *pfvf = netdev_priv(dev);
403
404 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
405 pfc->pfc_en = pfvf->pfc_en;
406
407 return 0;
408 }
409
otx2_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)410 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
411 {
412 struct otx2_nic *pfvf = netdev_priv(dev);
413 u8 old_pfc_en;
414 int err;
415
416 old_pfc_en = pfvf->pfc_en;
417 pfvf->pfc_en = pfc->pfc_en;
418
419 if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
420 goto process_pfc;
421
422 /* Check if the PFC configuration can be
423 * supported by the tx queue configuration
424 */
425 err = otx2_check_pfc_config(pfvf);
426 if (err) {
427 pfvf->pfc_en = old_pfc_en;
428 return err;
429 }
430
431 process_pfc:
432 err = otx2_config_priority_flow_ctrl(pfvf);
433 if (err) {
434 pfvf->pfc_en = old_pfc_en;
435 return err;
436 }
437
438 /* Request Per channel Bpids */
439 if (pfc->pfc_en)
440 otx2_nix_config_bp(pfvf, true);
441
442 err = otx2_pfc_txschq_update(pfvf);
443 if (err) {
444 if (pfc->pfc_en)
445 otx2_nix_config_bp(pfvf, false);
446
447 otx2_pfc_txschq_stop(pfvf);
448 pfvf->pfc_en = old_pfc_en;
449 otx2_config_priority_flow_ctrl(pfvf);
450 dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
451 return err;
452 }
453
454 return 0;
455 }
456
otx2_dcbnl_getdcbx(struct net_device __always_unused * dev)457 static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
458 {
459 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
460 }
461
otx2_dcbnl_setdcbx(struct net_device __always_unused * dev,u8 mode)462 static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
463 {
464 return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
465 }
466
467 static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
468 .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
469 .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
470 .getdcbx = otx2_dcbnl_getdcbx,
471 .setdcbx = otx2_dcbnl_setdcbx,
472 };
473
otx2_dcbnl_set_ops(struct net_device * dev)474 int otx2_dcbnl_set_ops(struct net_device *dev)
475 {
476 struct otx2_nic *pfvf = netdev_priv(dev);
477
478 pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
479 GFP_KERNEL);
480 if (!pfvf->queue_to_pfc_map)
481 return -ENOMEM;
482 dev->dcbnl_ops = &otx2_dcbnl_ops;
483
484 return 0;
485 }
486 EXPORT_SYMBOL(otx2_dcbnl_set_ops);
487