1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "rvu_reg.h"
18 
19 struct cgx_evq_entry {
20 	struct list_head evq_node;
21 	struct cgx_link_event link_event;
22 };
23 
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
25 static struct _req_type __maybe_unused					\
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
27 {									\
28 	struct _req_type *req;						\
29 									\
30 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
31 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 		sizeof(struct _rsp_type));				\
33 	if (!req)							\
34 		return NULL;						\
35 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
36 	req->hdr.id = _id;						\
37 	return req;							\
38 }
39 
40 MBOX_UP_CGX_MESSAGES
41 #undef M
42 
43 /* Returns bitmap of mapped PFs */
44 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
45 {
46 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
47 }
48 
49 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
50 {
51 	unsigned long pfmap;
52 
53 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
54 
55 	/* Assumes only one pf mapped to a cgx lmac port */
56 	if (!pfmap)
57 		return -ENODEV;
58 	else
59 		return find_first_bit(&pfmap, 16);
60 }
61 
62 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
63 {
64 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
65 }
66 
67 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
68 {
69 	if (cgx_id >= rvu->cgx_cnt_max)
70 		return NULL;
71 
72 	return rvu->cgx_idmap[cgx_id];
73 }
74 
75 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
76 {
77 	struct npc_pkind *pkind = &rvu->hw->pkind;
78 	int cgx_cnt_max = rvu->cgx_cnt_max;
79 	int cgx, lmac_cnt, lmac;
80 	int pf = PF_CGXMAP_BASE;
81 	int size, free_pkind;
82 
83 	if (!cgx_cnt_max)
84 		return 0;
85 
86 	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
87 		return -EINVAL;
88 
89 	/* Alloc map table
90 	 * An additional entry is required since PF id starts from 1 and
91 	 * hence entry at offset 0 is invalid.
92 	 */
93 	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
94 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
95 	if (!rvu->pf2cgxlmac_map)
96 		return -ENOMEM;
97 
98 	/* Initialize all entries with an invalid cgx and lmac id */
99 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
100 
101 	/* Reverse map table */
102 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
103 				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
104 				  GFP_KERNEL);
105 	if (!rvu->cgxlmac2pf_map)
106 		return -ENOMEM;
107 
108 	rvu->cgx_mapped_pfs = 0;
109 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
110 		if (!rvu_cgx_pdata(cgx, rvu))
111 			continue;
112 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
113 		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
114 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
115 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
116 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
117 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
118 			rvu->cgx_mapped_pfs++;
119 		}
120 	}
121 	return 0;
122 }
123 
124 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
125 {
126 	struct cgx_evq_entry *qentry;
127 	unsigned long flags;
128 	int err;
129 
130 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
131 	if (!qentry)
132 		return -ENOMEM;
133 
134 	/* Lock the event queue before we read the local link status */
135 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
136 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
137 				&qentry->link_event.link_uinfo);
138 	qentry->link_event.cgx_id = cgx_id;
139 	qentry->link_event.lmac_id = lmac_id;
140 	if (err)
141 		goto skip_add;
142 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
143 skip_add:
144 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
145 
146 	/* start worker to process the events */
147 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
148 
149 	return 0;
150 }
151 
152 /* This is called from interrupt context and is expected to be atomic */
153 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
154 {
155 	struct cgx_evq_entry *qentry;
156 	struct rvu *rvu = data;
157 
158 	/* post event to the event queue */
159 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
160 	if (!qentry)
161 		return -ENOMEM;
162 	qentry->link_event = *event;
163 	spin_lock(&rvu->cgx_evq_lock);
164 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
165 	spin_unlock(&rvu->cgx_evq_lock);
166 
167 	/* start worker to process the events */
168 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
169 
170 	return 0;
171 }
172 
173 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
174 {
175 	struct cgx_link_user_info *linfo;
176 	struct cgx_link_info_msg *msg;
177 	unsigned long pfmap;
178 	int err, pfid;
179 
180 	linfo = &event->link_uinfo;
181 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
182 
183 	do {
184 		pfid = find_first_bit(&pfmap, 16);
185 		clear_bit(pfid, &pfmap);
186 
187 		/* check if notification is enabled */
188 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
189 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
190 				 event->cgx_id, event->lmac_id,
191 				 linfo->link_up ? "UP" : "DOWN");
192 			continue;
193 		}
194 
195 		/* Send mbox message to PF */
196 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
197 		if (!msg)
198 			continue;
199 		msg->link_info = *linfo;
200 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
201 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
202 		if (err)
203 			dev_warn(rvu->dev, "notification to pf %d failed\n",
204 				 pfid);
205 	} while (pfmap);
206 }
207 
208 static void cgx_evhandler_task(struct work_struct *work)
209 {
210 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
211 	struct cgx_evq_entry *qentry;
212 	struct cgx_link_event *event;
213 	unsigned long flags;
214 
215 	do {
216 		/* Dequeue an event */
217 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
218 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
219 						  struct cgx_evq_entry,
220 						  evq_node);
221 		if (qentry)
222 			list_del(&qentry->evq_node);
223 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
224 		if (!qentry)
225 			break; /* nothing more to process */
226 
227 		event = &qentry->link_event;
228 
229 		/* process event */
230 		cgx_notify_pfs(event, rvu);
231 		kfree(qentry);
232 	} while (1);
233 }
234 
235 static int cgx_lmac_event_handler_init(struct rvu *rvu)
236 {
237 	struct cgx_event_cb cb;
238 	int cgx, lmac, err;
239 	void *cgxd;
240 
241 	spin_lock_init(&rvu->cgx_evq_lock);
242 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
243 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
244 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
245 	if (!rvu->cgx_evh_wq) {
246 		dev_err(rvu->dev, "alloc workqueue failed");
247 		return -ENOMEM;
248 	}
249 
250 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
251 	cb.data = rvu;
252 
253 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
254 		cgxd = rvu_cgx_pdata(cgx, rvu);
255 		if (!cgxd)
256 			continue;
257 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
258 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
259 			if (err)
260 				dev_err(rvu->dev,
261 					"%d:%d handler register failed\n",
262 					cgx, lmac);
263 		}
264 	}
265 
266 	return 0;
267 }
268 
269 static void rvu_cgx_wq_destroy(struct rvu *rvu)
270 {
271 	if (rvu->cgx_evh_wq) {
272 		flush_workqueue(rvu->cgx_evh_wq);
273 		destroy_workqueue(rvu->cgx_evh_wq);
274 		rvu->cgx_evh_wq = NULL;
275 	}
276 }
277 
278 int rvu_cgx_init(struct rvu *rvu)
279 {
280 	int cgx, err;
281 	void *cgxd;
282 
283 	/* CGX port id starts from 0 and are not necessarily contiguous
284 	 * Hence we allocate resources based on the maximum port id value.
285 	 */
286 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
287 	if (!rvu->cgx_cnt_max) {
288 		dev_info(rvu->dev, "No CGX devices found!\n");
289 		return -ENODEV;
290 	}
291 
292 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
293 				      sizeof(void *), GFP_KERNEL);
294 	if (!rvu->cgx_idmap)
295 		return -ENOMEM;
296 
297 	/* Initialize the cgxdata table */
298 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
299 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
300 
301 	/* Map CGX LMAC interfaces to RVU PFs */
302 	err = rvu_map_cgx_lmac_pf(rvu);
303 	if (err)
304 		return err;
305 
306 	/* Register for CGX events */
307 	err = cgx_lmac_event_handler_init(rvu);
308 	if (err)
309 		return err;
310 
311 	mutex_init(&rvu->cgx_cfg_lock);
312 
313 	/* Ensure event handler registration is completed, before
314 	 * we turn on the links
315 	 */
316 	mb();
317 
318 	/* Do link up for all CGX ports */
319 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
320 		cgxd = rvu_cgx_pdata(cgx, rvu);
321 		if (!cgxd)
322 			continue;
323 		err = cgx_lmac_linkup_start(cgxd);
324 		if (err)
325 			dev_err(rvu->dev,
326 				"Link up process failed to start on cgx %d\n",
327 				cgx);
328 	}
329 
330 	return 0;
331 }
332 
333 int rvu_cgx_exit(struct rvu *rvu)
334 {
335 	int cgx, lmac;
336 	void *cgxd;
337 
338 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
339 		cgxd = rvu_cgx_pdata(cgx, rvu);
340 		if (!cgxd)
341 			continue;
342 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
343 			cgx_lmac_evh_unregister(cgxd, lmac);
344 	}
345 
346 	/* Ensure event handler unregister is completed */
347 	mb();
348 
349 	rvu_cgx_wq_destroy(rvu);
350 	return 0;
351 }
352 
353 /* Most of the CGX configuration is restricted to the mapped PF only,
354  * VF's of mapped PF and other PFs are not allowed. This fn() checks
355  * whether a PFFUNC is permitted to do the config or not.
356  */
357 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
358 {
359 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
360 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
361 		return false;
362 	return true;
363 }
364 
365 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
366 {
367 	u8 cgx_id, lmac_id;
368 	void *cgxd;
369 
370 	if (!is_pf_cgxmapped(rvu, pf))
371 		return;
372 
373 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
374 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
375 
376 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
377 	if (enable)
378 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
379 	else
380 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
381 }
382 
383 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
384 {
385 	int pf = rvu_get_pf(pcifunc);
386 	u8 cgx_id, lmac_id;
387 
388 	if (!is_cgx_config_permitted(rvu, pcifunc))
389 		return -EPERM;
390 
391 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
392 
393 	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
394 
395 	return 0;
396 }
397 
398 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
399 				    struct msg_rsp *rsp)
400 {
401 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
402 	return 0;
403 }
404 
405 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
406 				   struct msg_rsp *rsp)
407 {
408 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
409 	return 0;
410 }
411 
412 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
413 			       struct cgx_stats_rsp *rsp)
414 {
415 	int pf = rvu_get_pf(req->hdr.pcifunc);
416 	int stat = 0, err = 0;
417 	u64 tx_stat, rx_stat;
418 	u8 cgx_idx, lmac;
419 	void *cgxd;
420 
421 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
422 		return -ENODEV;
423 
424 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
425 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
426 
427 	/* Rx stats */
428 	while (stat < CGX_RX_STATS_COUNT) {
429 		err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
430 		if (err)
431 			return err;
432 		rsp->rx_stats[stat] = rx_stat;
433 		stat++;
434 	}
435 
436 	/* Tx stats */
437 	stat = 0;
438 	while (stat < CGX_TX_STATS_COUNT) {
439 		err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
440 		if (err)
441 			return err;
442 		rsp->tx_stats[stat] = tx_stat;
443 		stat++;
444 	}
445 	return 0;
446 }
447 
448 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
449 				      struct cgx_mac_addr_set_or_get *req,
450 				      struct cgx_mac_addr_set_or_get *rsp)
451 {
452 	int pf = rvu_get_pf(req->hdr.pcifunc);
453 	u8 cgx_id, lmac_id;
454 
455 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
456 
457 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
458 
459 	return 0;
460 }
461 
462 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
463 				      struct cgx_mac_addr_set_or_get *req,
464 				      struct cgx_mac_addr_set_or_get *rsp)
465 {
466 	int pf = rvu_get_pf(req->hdr.pcifunc);
467 	u8 cgx_id, lmac_id;
468 	int rc = 0, i;
469 	u64 cfg;
470 
471 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
472 
473 	rsp->hdr.rc = rc;
474 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
475 	/* copy 48 bit mac address to req->mac_addr */
476 	for (i = 0; i < ETH_ALEN; i++)
477 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
478 	return 0;
479 }
480 
481 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
482 					struct msg_rsp *rsp)
483 {
484 	u16 pcifunc = req->hdr.pcifunc;
485 	int pf = rvu_get_pf(pcifunc);
486 	u8 cgx_id, lmac_id;
487 
488 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
489 		return -EPERM;
490 
491 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
492 
493 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
494 	return 0;
495 }
496 
497 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
498 					 struct msg_rsp *rsp)
499 {
500 	int pf = rvu_get_pf(req->hdr.pcifunc);
501 	u8 cgx_id, lmac_id;
502 
503 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
504 		return -EPERM;
505 
506 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
507 
508 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
509 	return 0;
510 }
511 
512 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
513 {
514 	int pf = rvu_get_pf(pcifunc);
515 	u8 cgx_id, lmac_id;
516 
517 	if (!is_cgx_config_permitted(rvu, pcifunc))
518 		return -EPERM;
519 
520 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
521 
522 	if (en) {
523 		set_bit(pf, &rvu->pf_notify_bmap);
524 		/* Send the current link status to PF */
525 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
526 	} else {
527 		clear_bit(pf, &rvu->pf_notify_bmap);
528 	}
529 
530 	return 0;
531 }
532 
533 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
534 					  struct msg_rsp *rsp)
535 {
536 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
537 	return 0;
538 }
539 
540 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
541 					 struct msg_rsp *rsp)
542 {
543 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
544 	return 0;
545 }
546 
547 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
548 				      struct cgx_link_info_msg *rsp)
549 {
550 	u8 cgx_id, lmac_id;
551 	int pf, err;
552 
553 	pf = rvu_get_pf(req->hdr.pcifunc);
554 
555 	if (!is_pf_cgxmapped(rvu, pf))
556 		return -ENODEV;
557 
558 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
559 
560 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
561 				&rsp->link_info);
562 	return err;
563 }
564 
565 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
566 {
567 	int pf = rvu_get_pf(pcifunc);
568 	u8 cgx_id, lmac_id;
569 
570 	if (!is_cgx_config_permitted(rvu, pcifunc))
571 		return -EPERM;
572 
573 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
574 
575 	return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
576 					  lmac_id, en);
577 }
578 
579 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
580 				       struct msg_rsp *rsp)
581 {
582 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
583 	return 0;
584 }
585 
586 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
587 					struct msg_rsp *rsp)
588 {
589 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
590 	return 0;
591 }
592 
593 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
594 				       struct cgx_pause_frm_cfg *req,
595 				       struct cgx_pause_frm_cfg *rsp)
596 {
597 	int pf = rvu_get_pf(req->hdr.pcifunc);
598 	u8 cgx_id, lmac_id;
599 
600 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
601 	 * if received from other PF/VF simply ACK, nothing to do.
602 	 */
603 	if (!is_pf_cgxmapped(rvu, pf))
604 		return -ENODEV;
605 
606 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
607 
608 	if (req->set)
609 		cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
610 				       req->tx_pause, req->rx_pause);
611 	else
612 		cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
613 				       &rsp->tx_pause, &rsp->rx_pause);
614 	return 0;
615 }
616 
617 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
618  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
619  */
620 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
621 			   int index, int rxtxflag, u64 *stat)
622 {
623 	struct rvu_block *block;
624 	int blkaddr;
625 	u16 pcifunc;
626 	int pf, lf;
627 
628 	*stat = 0;
629 
630 	if (!cgxd || !rvu)
631 		return -EINVAL;
632 
633 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
634 	if (pf < 0)
635 		return pf;
636 
637 	/* Assumes LF of a PF and all of its VF belongs to the same
638 	 * NIX block
639 	 */
640 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
641 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
642 	if (blkaddr < 0)
643 		return 0;
644 	block = &rvu->hw->block[blkaddr];
645 
646 	for (lf = 0; lf < block->lf.max; lf++) {
647 		/* Check if a lf is attached to this PF or one of its VFs */
648 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
649 			 ~RVU_PFVF_FUNC_MASK)))
650 			continue;
651 		if (rxtxflag == NIX_STATS_RX)
652 			*stat += rvu_read64(rvu, blkaddr,
653 					    NIX_AF_LFX_RX_STATX(lf, index));
654 		else
655 			*stat += rvu_read64(rvu, blkaddr,
656 					    NIX_AF_LFX_TX_STATX(lf, index));
657 	}
658 
659 	return 0;
660 }
661 
662 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
663 {
664 	struct rvu_pfvf *parent_pf, *pfvf;
665 	int cgx_users, err = 0;
666 
667 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
668 		return 0;
669 
670 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
671 	pfvf = rvu_get_pfvf(rvu, pcifunc);
672 
673 	mutex_lock(&rvu->cgx_cfg_lock);
674 
675 	if (start && pfvf->cgx_in_use)
676 		goto exit;  /* CGX is already started hence nothing to do */
677 	if (!start && !pfvf->cgx_in_use)
678 		goto exit; /* CGX is already stopped hence nothing to do */
679 
680 	if (start) {
681 		cgx_users = parent_pf->cgx_users;
682 		parent_pf->cgx_users++;
683 	} else {
684 		parent_pf->cgx_users--;
685 		cgx_users = parent_pf->cgx_users;
686 	}
687 
688 	/* Start CGX when first of all NIXLFs is started.
689 	 * Stop CGX when last of all NIXLFs is stopped.
690 	 */
691 	if (!cgx_users) {
692 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
693 					  start);
694 		if (err) {
695 			dev_err(rvu->dev, "Unable to %s CGX\n",
696 				start ? "start" : "stop");
697 			/* Revert the usage count in case of error */
698 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
699 					       : parent_pf->cgx_users  + 1;
700 			goto exit;
701 		}
702 	}
703 	pfvf->cgx_in_use = start;
704 exit:
705 	mutex_unlock(&rvu->cgx_cfg_lock);
706 	return err;
707 }
708