1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu.h"
16 #include "cgx.h"
17 #include "rvu_reg.h"
18 #include "rvu_trace.h"
19 
20 struct cgx_evq_entry {
21 	struct list_head evq_node;
22 	struct cgx_link_event link_event;
23 };
24 
25 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
26 static struct _req_type __maybe_unused					\
27 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
28 {									\
29 	struct _req_type *req;						\
30 									\
31 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
32 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
33 		sizeof(struct _rsp_type));				\
34 	if (!req)							\
35 		return NULL;						\
36 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
37 	req->hdr.id = _id;						\
38 	trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));		\
39 	return req;							\
40 }
41 
42 MBOX_UP_CGX_MESSAGES
43 #undef M
44 
45 /* Returns bitmap of mapped PFs */
46 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
47 {
48 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
49 }
50 
51 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
52 {
53 	unsigned long pfmap;
54 
55 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
56 
57 	/* Assumes only one pf mapped to a cgx lmac port */
58 	if (!pfmap)
59 		return -ENODEV;
60 	else
61 		return find_first_bit(&pfmap, 16);
62 }
63 
64 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
65 {
66 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
67 }
68 
69 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
70 {
71 	if (cgx_id >= rvu->cgx_cnt_max)
72 		return NULL;
73 
74 	return rvu->cgx_idmap[cgx_id];
75 }
76 
77 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
78 {
79 	struct npc_pkind *pkind = &rvu->hw->pkind;
80 	int cgx_cnt_max = rvu->cgx_cnt_max;
81 	int cgx, lmac_cnt, lmac;
82 	int pf = PF_CGXMAP_BASE;
83 	int size, free_pkind;
84 
85 	if (!cgx_cnt_max)
86 		return 0;
87 
88 	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
89 		return -EINVAL;
90 
91 	/* Alloc map table
92 	 * An additional entry is required since PF id starts from 1 and
93 	 * hence entry at offset 0 is invalid.
94 	 */
95 	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
96 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
97 	if (!rvu->pf2cgxlmac_map)
98 		return -ENOMEM;
99 
100 	/* Initialize all entries with an invalid cgx and lmac id */
101 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
102 
103 	/* Reverse map table */
104 	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
105 				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
106 				  GFP_KERNEL);
107 	if (!rvu->cgxlmac2pf_map)
108 		return -ENOMEM;
109 
110 	rvu->cgx_mapped_pfs = 0;
111 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
112 		if (!rvu_cgx_pdata(cgx, rvu))
113 			continue;
114 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
115 		for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
116 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
117 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
118 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
119 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
120 			rvu->cgx_mapped_pfs++;
121 		}
122 	}
123 	return 0;
124 }
125 
126 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
127 {
128 	struct cgx_evq_entry *qentry;
129 	unsigned long flags;
130 	int err;
131 
132 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
133 	if (!qentry)
134 		return -ENOMEM;
135 
136 	/* Lock the event queue before we read the local link status */
137 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
138 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
139 				&qentry->link_event.link_uinfo);
140 	qentry->link_event.cgx_id = cgx_id;
141 	qentry->link_event.lmac_id = lmac_id;
142 	if (err)
143 		goto skip_add;
144 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
145 skip_add:
146 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
147 
148 	/* start worker to process the events */
149 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
150 
151 	return 0;
152 }
153 
154 /* This is called from interrupt context and is expected to be atomic */
155 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
156 {
157 	struct cgx_evq_entry *qentry;
158 	struct rvu *rvu = data;
159 
160 	/* post event to the event queue */
161 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
162 	if (!qentry)
163 		return -ENOMEM;
164 	qentry->link_event = *event;
165 	spin_lock(&rvu->cgx_evq_lock);
166 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
167 	spin_unlock(&rvu->cgx_evq_lock);
168 
169 	/* start worker to process the events */
170 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
171 
172 	return 0;
173 }
174 
175 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
176 {
177 	struct cgx_link_user_info *linfo;
178 	struct cgx_link_info_msg *msg;
179 	unsigned long pfmap;
180 	int err, pfid;
181 
182 	linfo = &event->link_uinfo;
183 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
184 
185 	do {
186 		pfid = find_first_bit(&pfmap, 16);
187 		clear_bit(pfid, &pfmap);
188 
189 		/* check if notification is enabled */
190 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
191 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
192 				 event->cgx_id, event->lmac_id,
193 				 linfo->link_up ? "UP" : "DOWN");
194 			continue;
195 		}
196 
197 		/* Send mbox message to PF */
198 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
199 		if (!msg)
200 			continue;
201 		msg->link_info = *linfo;
202 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
203 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
204 		if (err)
205 			dev_warn(rvu->dev, "notification to pf %d failed\n",
206 				 pfid);
207 	} while (pfmap);
208 }
209 
210 static void cgx_evhandler_task(struct work_struct *work)
211 {
212 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
213 	struct cgx_evq_entry *qentry;
214 	struct cgx_link_event *event;
215 	unsigned long flags;
216 
217 	do {
218 		/* Dequeue an event */
219 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
220 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
221 						  struct cgx_evq_entry,
222 						  evq_node);
223 		if (qentry)
224 			list_del(&qentry->evq_node);
225 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
226 		if (!qentry)
227 			break; /* nothing more to process */
228 
229 		event = &qentry->link_event;
230 
231 		/* process event */
232 		cgx_notify_pfs(event, rvu);
233 		kfree(qentry);
234 	} while (1);
235 }
236 
237 static int cgx_lmac_event_handler_init(struct rvu *rvu)
238 {
239 	struct cgx_event_cb cb;
240 	int cgx, lmac, err;
241 	void *cgxd;
242 
243 	spin_lock_init(&rvu->cgx_evq_lock);
244 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
245 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
246 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
247 	if (!rvu->cgx_evh_wq) {
248 		dev_err(rvu->dev, "alloc workqueue failed");
249 		return -ENOMEM;
250 	}
251 
252 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
253 	cb.data = rvu;
254 
255 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
256 		cgxd = rvu_cgx_pdata(cgx, rvu);
257 		if (!cgxd)
258 			continue;
259 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
260 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
261 			if (err)
262 				dev_err(rvu->dev,
263 					"%d:%d handler register failed\n",
264 					cgx, lmac);
265 		}
266 	}
267 
268 	return 0;
269 }
270 
271 static void rvu_cgx_wq_destroy(struct rvu *rvu)
272 {
273 	if (rvu->cgx_evh_wq) {
274 		flush_workqueue(rvu->cgx_evh_wq);
275 		destroy_workqueue(rvu->cgx_evh_wq);
276 		rvu->cgx_evh_wq = NULL;
277 	}
278 }
279 
280 int rvu_cgx_init(struct rvu *rvu)
281 {
282 	int cgx, err;
283 	void *cgxd;
284 
285 	/* CGX port id starts from 0 and are not necessarily contiguous
286 	 * Hence we allocate resources based on the maximum port id value.
287 	 */
288 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
289 	if (!rvu->cgx_cnt_max) {
290 		dev_info(rvu->dev, "No CGX devices found!\n");
291 		return -ENODEV;
292 	}
293 
294 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
295 				      sizeof(void *), GFP_KERNEL);
296 	if (!rvu->cgx_idmap)
297 		return -ENOMEM;
298 
299 	/* Initialize the cgxdata table */
300 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
301 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
302 
303 	/* Map CGX LMAC interfaces to RVU PFs */
304 	err = rvu_map_cgx_lmac_pf(rvu);
305 	if (err)
306 		return err;
307 
308 	/* Register for CGX events */
309 	err = cgx_lmac_event_handler_init(rvu);
310 	if (err)
311 		return err;
312 
313 	mutex_init(&rvu->cgx_cfg_lock);
314 
315 	/* Ensure event handler registration is completed, before
316 	 * we turn on the links
317 	 */
318 	mb();
319 
320 	/* Do link up for all CGX ports */
321 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
322 		cgxd = rvu_cgx_pdata(cgx, rvu);
323 		if (!cgxd)
324 			continue;
325 		err = cgx_lmac_linkup_start(cgxd);
326 		if (err)
327 			dev_err(rvu->dev,
328 				"Link up process failed to start on cgx %d\n",
329 				cgx);
330 	}
331 
332 	return 0;
333 }
334 
335 int rvu_cgx_exit(struct rvu *rvu)
336 {
337 	int cgx, lmac;
338 	void *cgxd;
339 
340 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
341 		cgxd = rvu_cgx_pdata(cgx, rvu);
342 		if (!cgxd)
343 			continue;
344 		for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
345 			cgx_lmac_evh_unregister(cgxd, lmac);
346 	}
347 
348 	/* Ensure event handler unregister is completed */
349 	mb();
350 
351 	rvu_cgx_wq_destroy(rvu);
352 	return 0;
353 }
354 
355 /* Most of the CGX configuration is restricted to the mapped PF only,
356  * VF's of mapped PF and other PFs are not allowed. This fn() checks
357  * whether a PFFUNC is permitted to do the config or not.
358  */
359 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
360 {
361 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
362 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
363 		return false;
364 	return true;
365 }
366 
367 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
368 {
369 	u8 cgx_id, lmac_id;
370 	void *cgxd;
371 
372 	if (!is_pf_cgxmapped(rvu, pf))
373 		return;
374 
375 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
376 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
377 
378 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
379 	if (enable)
380 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
381 	else
382 		cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
383 }
384 
385 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
386 {
387 	int pf = rvu_get_pf(pcifunc);
388 	u8 cgx_id, lmac_id;
389 
390 	if (!is_cgx_config_permitted(rvu, pcifunc))
391 		return -EPERM;
392 
393 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
394 
395 	cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
396 
397 	return 0;
398 }
399 
400 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
401 				    struct msg_rsp *rsp)
402 {
403 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
404 	return 0;
405 }
406 
407 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
408 				   struct msg_rsp *rsp)
409 {
410 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
411 	return 0;
412 }
413 
414 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
415 			       struct cgx_stats_rsp *rsp)
416 {
417 	int pf = rvu_get_pf(req->hdr.pcifunc);
418 	int stat = 0, err = 0;
419 	u64 tx_stat, rx_stat;
420 	u8 cgx_idx, lmac;
421 	void *cgxd;
422 
423 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
424 		return -ENODEV;
425 
426 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
427 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
428 
429 	/* Rx stats */
430 	while (stat < CGX_RX_STATS_COUNT) {
431 		err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
432 		if (err)
433 			return err;
434 		rsp->rx_stats[stat] = rx_stat;
435 		stat++;
436 	}
437 
438 	/* Tx stats */
439 	stat = 0;
440 	while (stat < CGX_TX_STATS_COUNT) {
441 		err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
442 		if (err)
443 			return err;
444 		rsp->tx_stats[stat] = tx_stat;
445 		stat++;
446 	}
447 	return 0;
448 }
449 
450 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
451 				      struct cgx_mac_addr_set_or_get *req,
452 				      struct cgx_mac_addr_set_or_get *rsp)
453 {
454 	int pf = rvu_get_pf(req->hdr.pcifunc);
455 	u8 cgx_id, lmac_id;
456 
457 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
458 
459 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
460 
461 	return 0;
462 }
463 
464 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
465 				      struct cgx_mac_addr_set_or_get *req,
466 				      struct cgx_mac_addr_set_or_get *rsp)
467 {
468 	int pf = rvu_get_pf(req->hdr.pcifunc);
469 	u8 cgx_id, lmac_id;
470 	int rc = 0, i;
471 	u64 cfg;
472 
473 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
474 
475 	rsp->hdr.rc = rc;
476 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
477 	/* copy 48 bit mac address to req->mac_addr */
478 	for (i = 0; i < ETH_ALEN; i++)
479 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
480 	return 0;
481 }
482 
483 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
484 					struct msg_rsp *rsp)
485 {
486 	u16 pcifunc = req->hdr.pcifunc;
487 	int pf = rvu_get_pf(pcifunc);
488 	u8 cgx_id, lmac_id;
489 
490 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
491 		return -EPERM;
492 
493 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
494 
495 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
496 	return 0;
497 }
498 
499 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
500 					 struct msg_rsp *rsp)
501 {
502 	int pf = rvu_get_pf(req->hdr.pcifunc);
503 	u8 cgx_id, lmac_id;
504 
505 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
506 		return -EPERM;
507 
508 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
509 
510 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
511 	return 0;
512 }
513 
514 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
515 {
516 	int pf = rvu_get_pf(pcifunc);
517 	u8 cgx_id, lmac_id;
518 	void *cgxd;
519 
520 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
521 	 * if received from other PF/VF simply ACK, nothing to do.
522 	 */
523 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
524 	    !is_pf_cgxmapped(rvu, pf))
525 		return -ENODEV;
526 
527 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
528 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
529 
530 	cgx_lmac_ptp_config(cgxd, lmac_id, enable);
531 	/* If PTP is enabled then inform NPC that packets to be
532 	 * parsed by this PF will have their data shifted by 8 bytes
533 	 * and if PTP is disabled then no shift is required
534 	 */
535 	if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
536 		return -EINVAL;
537 
538 	return 0;
539 }
540 
541 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
542 				       struct msg_rsp *rsp)
543 {
544 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
545 }
546 
547 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
548 					struct msg_rsp *rsp)
549 {
550 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
551 }
552 
553 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
554 {
555 	int pf = rvu_get_pf(pcifunc);
556 	u8 cgx_id, lmac_id;
557 
558 	if (!is_cgx_config_permitted(rvu, pcifunc))
559 		return -EPERM;
560 
561 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
562 
563 	if (en) {
564 		set_bit(pf, &rvu->pf_notify_bmap);
565 		/* Send the current link status to PF */
566 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
567 	} else {
568 		clear_bit(pf, &rvu->pf_notify_bmap);
569 	}
570 
571 	return 0;
572 }
573 
574 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
575 					  struct msg_rsp *rsp)
576 {
577 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
578 	return 0;
579 }
580 
581 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
582 					 struct msg_rsp *rsp)
583 {
584 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
585 	return 0;
586 }
587 
588 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
589 				      struct cgx_link_info_msg *rsp)
590 {
591 	u8 cgx_id, lmac_id;
592 	int pf, err;
593 
594 	pf = rvu_get_pf(req->hdr.pcifunc);
595 
596 	if (!is_pf_cgxmapped(rvu, pf))
597 		return -ENODEV;
598 
599 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
600 
601 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
602 				&rsp->link_info);
603 	return err;
604 }
605 
606 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
607 {
608 	int pf = rvu_get_pf(pcifunc);
609 	u8 cgx_id, lmac_id;
610 
611 	if (!is_cgx_config_permitted(rvu, pcifunc))
612 		return -EPERM;
613 
614 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
615 
616 	return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
617 					  lmac_id, en);
618 }
619 
620 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
621 				       struct msg_rsp *rsp)
622 {
623 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
624 	return 0;
625 }
626 
627 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
628 					struct msg_rsp *rsp)
629 {
630 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
631 	return 0;
632 }
633 
634 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
635 				       struct cgx_pause_frm_cfg *req,
636 				       struct cgx_pause_frm_cfg *rsp)
637 {
638 	int pf = rvu_get_pf(req->hdr.pcifunc);
639 	u8 cgx_id, lmac_id;
640 
641 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
642 	 * if received from other PF/VF simply ACK, nothing to do.
643 	 */
644 	if (!is_pf_cgxmapped(rvu, pf))
645 		return -ENODEV;
646 
647 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
648 
649 	if (req->set)
650 		cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
651 				       req->tx_pause, req->rx_pause);
652 	else
653 		cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
654 				       &rsp->tx_pause, &rsp->rx_pause);
655 	return 0;
656 }
657 
658 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
659  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
660  */
661 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
662 			   int index, int rxtxflag, u64 *stat)
663 {
664 	struct rvu_block *block;
665 	int blkaddr;
666 	u16 pcifunc;
667 	int pf, lf;
668 
669 	*stat = 0;
670 
671 	if (!cgxd || !rvu)
672 		return -EINVAL;
673 
674 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
675 	if (pf < 0)
676 		return pf;
677 
678 	/* Assumes LF of a PF and all of its VF belongs to the same
679 	 * NIX block
680 	 */
681 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
682 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
683 	if (blkaddr < 0)
684 		return 0;
685 	block = &rvu->hw->block[blkaddr];
686 
687 	for (lf = 0; lf < block->lf.max; lf++) {
688 		/* Check if a lf is attached to this PF or one of its VFs */
689 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
690 			 ~RVU_PFVF_FUNC_MASK)))
691 			continue;
692 		if (rxtxflag == NIX_STATS_RX)
693 			*stat += rvu_read64(rvu, blkaddr,
694 					    NIX_AF_LFX_RX_STATX(lf, index));
695 		else
696 			*stat += rvu_read64(rvu, blkaddr,
697 					    NIX_AF_LFX_TX_STATX(lf, index));
698 	}
699 
700 	return 0;
701 }
702 
703 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
704 {
705 	struct rvu_pfvf *parent_pf, *pfvf;
706 	int cgx_users, err = 0;
707 
708 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
709 		return 0;
710 
711 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
712 	pfvf = rvu_get_pfvf(rvu, pcifunc);
713 
714 	mutex_lock(&rvu->cgx_cfg_lock);
715 
716 	if (start && pfvf->cgx_in_use)
717 		goto exit;  /* CGX is already started hence nothing to do */
718 	if (!start && !pfvf->cgx_in_use)
719 		goto exit; /* CGX is already stopped hence nothing to do */
720 
721 	if (start) {
722 		cgx_users = parent_pf->cgx_users;
723 		parent_pf->cgx_users++;
724 	} else {
725 		parent_pf->cgx_users--;
726 		cgx_users = parent_pf->cgx_users;
727 	}
728 
729 	/* Start CGX when first of all NIXLFs is started.
730 	 * Stop CGX when last of all NIXLFs is stopped.
731 	 */
732 	if (!cgx_users) {
733 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
734 					  start);
735 		if (err) {
736 			dev_err(rvu->dev, "Unable to %s CGX\n",
737 				start ? "start" : "stop");
738 			/* Revert the usage count in case of error */
739 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
740 					       : parent_pf->cgx_users  + 1;
741 			goto exit;
742 		}
743 	}
744 	pfvf->cgx_in_use = start;
745 exit:
746 	mutex_unlock(&rvu->cgx_cfg_lock);
747 	return err;
748 }
749