1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17 #include "rvu_npc_hash.h"
18 
19 struct cgx_evq_entry {
20 	struct list_head evq_node;
21 	struct cgx_link_event link_event;
22 };
23 
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
25 static struct _req_type __maybe_unused					\
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
27 {									\
28 	struct _req_type *req;						\
29 									\
30 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
31 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 		sizeof(struct _rsp_type));				\
33 	if (!req)							\
34 		return NULL;						\
35 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
36 	req->hdr.id = _id;						\
37 	trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));		\
38 	return req;							\
39 }
40 
41 MBOX_UP_CGX_MESSAGES
42 #undef M
43 
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
45 {
46 	u8 cgx_id, lmac_id;
47 	void *cgxd;
48 
49 	if (!is_pf_cgxmapped(rvu, pf))
50 		return 0;
51 
52 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
53 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
54 
55 	return  (cgx_features_get(cgxd) & feature);
56 }
57 
58 #define CGX_OFFSET(x)			((x) * rvu->hw->lmac_per_cgx)
59 /* Returns bitmap of mapped PFs */
60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
61 {
62 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
63 }
64 
65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
66 {
67 	unsigned long pfmap;
68 
69 	pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
70 
71 	/* Assumes only one pf mapped to a cgx lmac port */
72 	if (!pfmap)
73 		return -ENODEV;
74 	else
75 		return find_first_bit(&pfmap,
76 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
77 }
78 
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81 	return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83 
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86 	if (cgx_id >= rvu->cgx_cnt_max)
87 		return NULL;
88 
89 	return rvu->cgx_idmap[cgx_id];
90 }
91 
92 /* Return first enabled CGX instance if none are enabled then return NULL */
93 void *rvu_first_cgx_pdata(struct rvu *rvu)
94 {
95 	int first_enabled_cgx = 0;
96 	void *cgxd = NULL;
97 
98 	for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 		cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
100 		if (cgxd)
101 			break;
102 	}
103 
104 	return cgxd;
105 }
106 
107 /* Based on P2X connectivity find mapped NIX block for a PF */
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 				  int cgx_id, int lmac_id)
110 {
111 	struct rvu_pfvf *pfvf = &rvu->pf[pf];
112 	u8 p2x;
113 
114 	p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 	/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 	pfvf->nix_blkaddr = BLKADDR_NIX0;
117 	if (p2x == CMR_P2X_SEL_NIX1)
118 		pfvf->nix_blkaddr = BLKADDR_NIX1;
119 }
120 
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
122 {
123 	struct npc_pkind *pkind = &rvu->hw->pkind;
124 	int cgx_cnt_max = rvu->cgx_cnt_max;
125 	int pf = PF_CGXMAP_BASE;
126 	unsigned long lmac_bmap;
127 	int size, free_pkind;
128 	int cgx, lmac, iter;
129 	int numvfs, hwvfs;
130 
131 	if (!cgx_cnt_max)
132 		return 0;
133 
134 	if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
135 		return -EINVAL;
136 
137 	/* Alloc map table
138 	 * An additional entry is required since PF id starts from 1 and
139 	 * hence entry at offset 0 is invalid.
140 	 */
141 	size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
142 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
143 	if (!rvu->pf2cgxlmac_map)
144 		return -ENOMEM;
145 
146 	/* Initialize all entries with an invalid cgx and lmac id */
147 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
148 
149 	/* Reverse map table */
150 	rvu->cgxlmac2pf_map =
151 		devm_kzalloc(rvu->dev,
152 			     cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
153 			     GFP_KERNEL);
154 	if (!rvu->cgxlmac2pf_map)
155 		return -ENOMEM;
156 
157 	rvu->cgx_mapped_pfs = 0;
158 	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
159 		if (!rvu_cgx_pdata(cgx, rvu))
160 			continue;
161 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
164 					      iter);
165 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
166 			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
167 			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
168 			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
169 			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
170 			rvu->cgx_mapped_pfs++;
171 			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
172 			rvu->cgx_mapped_vfs += numvfs;
173 			pf++;
174 		}
175 	}
176 	return 0;
177 }
178 
179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
180 {
181 	struct cgx_evq_entry *qentry;
182 	unsigned long flags;
183 	int err;
184 
185 	qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
186 	if (!qentry)
187 		return -ENOMEM;
188 
189 	/* Lock the event queue before we read the local link status */
190 	spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
191 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
192 				&qentry->link_event.link_uinfo);
193 	qentry->link_event.cgx_id = cgx_id;
194 	qentry->link_event.lmac_id = lmac_id;
195 	if (err) {
196 		kfree(qentry);
197 		goto skip_add;
198 	}
199 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
200 skip_add:
201 	spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
202 
203 	/* start worker to process the events */
204 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205 
206 	return 0;
207 }
208 
209 /* This is called from interrupt context and is expected to be atomic */
210 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
211 {
212 	struct cgx_evq_entry *qentry;
213 	struct rvu *rvu = data;
214 
215 	/* post event to the event queue */
216 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
217 	if (!qentry)
218 		return -ENOMEM;
219 	qentry->link_event = *event;
220 	spin_lock(&rvu->cgx_evq_lock);
221 	list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
222 	spin_unlock(&rvu->cgx_evq_lock);
223 
224 	/* start worker to process the events */
225 	queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226 
227 	return 0;
228 }
229 
230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
231 {
232 	struct cgx_link_user_info *linfo;
233 	struct cgx_link_info_msg *msg;
234 	unsigned long pfmap;
235 	int err, pfid;
236 
237 	linfo = &event->link_uinfo;
238 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
239 
240 	do {
241 		pfid = find_first_bit(&pfmap,
242 				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
243 		clear_bit(pfid, &pfmap);
244 
245 		/* check if notification is enabled */
246 		if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
247 			dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
248 				 event->cgx_id, event->lmac_id,
249 				 linfo->link_up ? "UP" : "DOWN");
250 			continue;
251 		}
252 
253 		/* Send mbox message to PF */
254 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
255 		if (!msg)
256 			continue;
257 		msg->link_info = *linfo;
258 		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
259 		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
260 		if (err)
261 			dev_warn(rvu->dev, "notification to pf %d failed\n",
262 				 pfid);
263 	} while (pfmap);
264 }
265 
266 static void cgx_evhandler_task(struct work_struct *work)
267 {
268 	struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
269 	struct cgx_evq_entry *qentry;
270 	struct cgx_link_event *event;
271 	unsigned long flags;
272 
273 	do {
274 		/* Dequeue an event */
275 		spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
276 		qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
277 						  struct cgx_evq_entry,
278 						  evq_node);
279 		if (qentry)
280 			list_del(&qentry->evq_node);
281 		spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
282 		if (!qentry)
283 			break; /* nothing more to process */
284 
285 		event = &qentry->link_event;
286 
287 		/* process event */
288 		cgx_notify_pfs(event, rvu);
289 		kfree(qentry);
290 	} while (1);
291 }
292 
293 static int cgx_lmac_event_handler_init(struct rvu *rvu)
294 {
295 	unsigned long lmac_bmap;
296 	struct cgx_event_cb cb;
297 	int cgx, lmac, err;
298 	void *cgxd;
299 
300 	spin_lock_init(&rvu->cgx_evq_lock);
301 	INIT_LIST_HEAD(&rvu->cgx_evq_head);
302 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
303 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
304 	if (!rvu->cgx_evh_wq) {
305 		dev_err(rvu->dev, "alloc workqueue failed");
306 		return -ENOMEM;
307 	}
308 
309 	cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
310 	cb.data = rvu;
311 
312 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
313 		cgxd = rvu_cgx_pdata(cgx, rvu);
314 		if (!cgxd)
315 			continue;
316 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
317 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
318 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
319 			if (err)
320 				dev_err(rvu->dev,
321 					"%d:%d handler register failed\n",
322 					cgx, lmac);
323 		}
324 	}
325 
326 	return 0;
327 }
328 
329 static void rvu_cgx_wq_destroy(struct rvu *rvu)
330 {
331 	if (rvu->cgx_evh_wq) {
332 		destroy_workqueue(rvu->cgx_evh_wq);
333 		rvu->cgx_evh_wq = NULL;
334 	}
335 }
336 
337 int rvu_cgx_init(struct rvu *rvu)
338 {
339 	int cgx, err;
340 	void *cgxd;
341 
342 	/* CGX port id starts from 0 and are not necessarily contiguous
343 	 * Hence we allocate resources based on the maximum port id value.
344 	 */
345 	rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
346 	if (!rvu->cgx_cnt_max) {
347 		dev_info(rvu->dev, "No CGX devices found!\n");
348 		return -ENODEV;
349 	}
350 
351 	rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
352 				      sizeof(void *), GFP_KERNEL);
353 	if (!rvu->cgx_idmap)
354 		return -ENOMEM;
355 
356 	/* Initialize the cgxdata table */
357 	for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
358 		rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
359 
360 	/* Map CGX LMAC interfaces to RVU PFs */
361 	err = rvu_map_cgx_lmac_pf(rvu);
362 	if (err)
363 		return err;
364 
365 	/* Register for CGX events */
366 	err = cgx_lmac_event_handler_init(rvu);
367 	if (err)
368 		return err;
369 
370 	mutex_init(&rvu->cgx_cfg_lock);
371 
372 	/* Ensure event handler registration is completed, before
373 	 * we turn on the links
374 	 */
375 	mb();
376 
377 	/* Do link up for all CGX ports */
378 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
379 		cgxd = rvu_cgx_pdata(cgx, rvu);
380 		if (!cgxd)
381 			continue;
382 		err = cgx_lmac_linkup_start(cgxd);
383 		if (err)
384 			dev_err(rvu->dev,
385 				"Link up process failed to start on cgx %d\n",
386 				cgx);
387 	}
388 
389 	return 0;
390 }
391 
392 int rvu_cgx_exit(struct rvu *rvu)
393 {
394 	unsigned long lmac_bmap;
395 	int cgx, lmac;
396 	void *cgxd;
397 
398 	for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
399 		cgxd = rvu_cgx_pdata(cgx, rvu);
400 		if (!cgxd)
401 			continue;
402 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
403 		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
404 			cgx_lmac_evh_unregister(cgxd, lmac);
405 	}
406 
407 	/* Ensure event handler unregister is completed */
408 	mb();
409 
410 	rvu_cgx_wq_destroy(rvu);
411 	return 0;
412 }
413 
414 /* Most of the CGX configuration is restricted to the mapped PF only,
415  * VF's of mapped PF and other PFs are not allowed. This fn() checks
416  * whether a PFFUNC is permitted to do the config or not.
417  */
418 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
419 {
420 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
421 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
422 		return false;
423 	return true;
424 }
425 
426 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
427 {
428 	struct mac_ops *mac_ops;
429 	u8 cgx_id, lmac_id;
430 	void *cgxd;
431 
432 	if (!is_pf_cgxmapped(rvu, pf))
433 		return;
434 
435 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
436 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
437 
438 	mac_ops = get_mac_ops(cgxd);
439 	/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
440 	if (enable)
441 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
442 	else
443 		mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
444 }
445 
446 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
447 {
448 	int pf = rvu_get_pf(pcifunc);
449 	struct mac_ops *mac_ops;
450 	u8 cgx_id, lmac_id;
451 	void *cgxd;
452 
453 	if (!is_cgx_config_permitted(rvu, pcifunc))
454 		return LMAC_AF_ERR_PERM_DENIED;
455 
456 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
457 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
458 	mac_ops = get_mac_ops(cgxd);
459 
460 	return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
461 }
462 
463 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
464 {
465 	struct mac_ops *mac_ops;
466 
467 	mac_ops = get_mac_ops(cgxd);
468 	return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
469 }
470 
471 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
472 {
473 	int pf = rvu_get_pf(pcifunc);
474 	int i = 0, lmac_count = 0;
475 	struct mac_ops *mac_ops;
476 	u8 max_dmac_filters;
477 	u8 cgx_id, lmac_id;
478 	void *cgx_dev;
479 
480 	if (!is_cgx_config_permitted(rvu, pcifunc))
481 		return;
482 
483 	if (rvu_npc_exact_has_match_table(rvu)) {
484 		rvu_npc_exact_reset(rvu, pcifunc);
485 		return;
486 	}
487 
488 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
489 	cgx_dev = cgx_get_pdata(cgx_id);
490 	lmac_count = cgx_get_lmac_cnt(cgx_dev);
491 
492 	mac_ops = get_mac_ops(cgx_dev);
493 	if (!mac_ops)
494 		return;
495 
496 	max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
497 
498 	for (i = 0; i < max_dmac_filters; i++)
499 		cgx_lmac_addr_del(cgx_id, lmac_id, i);
500 
501 	/* As cgx_lmac_addr_del does not clear entry for index 0
502 	 * so it needs to be done explicitly
503 	 */
504 	cgx_lmac_addr_reset(cgx_id, lmac_id);
505 }
506 
507 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
508 				    struct msg_rsp *rsp)
509 {
510 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
511 	return 0;
512 }
513 
514 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
515 				   struct msg_rsp *rsp)
516 {
517 	rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
518 	return 0;
519 }
520 
521 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
522 			      void *rsp)
523 {
524 	int pf = rvu_get_pf(req->hdr.pcifunc);
525 	struct mac_ops *mac_ops;
526 	int stat = 0, err = 0;
527 	u64 tx_stat, rx_stat;
528 	u8 cgx_idx, lmac;
529 	void *cgxd;
530 
531 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
532 		return LMAC_AF_ERR_PERM_DENIED;
533 
534 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
535 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
536 	mac_ops = get_mac_ops(cgxd);
537 
538 	/* Rx stats */
539 	while (stat < mac_ops->rx_stats_cnt) {
540 		err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
541 		if (err)
542 			return err;
543 		if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
544 			((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
545 		else
546 			((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
547 		stat++;
548 	}
549 
550 	/* Tx stats */
551 	stat = 0;
552 	while (stat < mac_ops->tx_stats_cnt) {
553 		err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
554 		if (err)
555 			return err;
556 		if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
557 			((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
558 		else
559 			((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
560 		stat++;
561 	}
562 	return 0;
563 }
564 
565 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
566 			       struct cgx_stats_rsp *rsp)
567 {
568 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
569 }
570 
571 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
572 			       struct rpm_stats_rsp *rsp)
573 {
574 	return rvu_lmac_get_stats(rvu, req, (void *)rsp);
575 }
576 
577 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
578 				   struct msg_req *req,
579 				   struct cgx_fec_stats_rsp *rsp)
580 {
581 	int pf = rvu_get_pf(req->hdr.pcifunc);
582 	struct mac_ops *mac_ops;
583 	u8 cgx_idx, lmac;
584 	void *cgxd;
585 
586 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
587 		return LMAC_AF_ERR_PERM_DENIED;
588 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
589 
590 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
591 	mac_ops = get_mac_ops(cgxd);
592 	return  mac_ops->get_fec_stats(cgxd, lmac, rsp);
593 }
594 
595 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
596 				      struct cgx_mac_addr_set_or_get *req,
597 				      struct cgx_mac_addr_set_or_get *rsp)
598 {
599 	int pf = rvu_get_pf(req->hdr.pcifunc);
600 	u8 cgx_id, lmac_id;
601 
602 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
603 		return -EPERM;
604 
605 	if (rvu_npc_exact_has_match_table(rvu))
606 		return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
607 
608 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
609 
610 	cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
611 
612 	return 0;
613 }
614 
615 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
616 				      struct cgx_mac_addr_add_req *req,
617 				      struct cgx_mac_addr_add_rsp *rsp)
618 {
619 	int pf = rvu_get_pf(req->hdr.pcifunc);
620 	u8 cgx_id, lmac_id;
621 	int rc = 0;
622 
623 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
624 		return -EPERM;
625 
626 	if (rvu_npc_exact_has_match_table(rvu))
627 		return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
628 
629 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630 	rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
631 	if (rc >= 0) {
632 		rsp->index = rc;
633 		return 0;
634 	}
635 
636 	return rc;
637 }
638 
639 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
640 				      struct cgx_mac_addr_del_req *req,
641 				      struct msg_rsp *rsp)
642 {
643 	int pf = rvu_get_pf(req->hdr.pcifunc);
644 	u8 cgx_id, lmac_id;
645 
646 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
647 		return -EPERM;
648 
649 	if (rvu_npc_exact_has_match_table(rvu))
650 		return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
651 
652 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
653 	return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
654 }
655 
656 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
657 					     struct msg_req *req,
658 					     struct cgx_max_dmac_entries_get_rsp
659 					     *rsp)
660 {
661 	int pf = rvu_get_pf(req->hdr.pcifunc);
662 	u8 cgx_id, lmac_id;
663 
664 	/* If msg is received from PFs(which are not mapped to CGX LMACs)
665 	 * or VF then no entries are allocated for DMAC filters at CGX level.
666 	 * So returning zero.
667 	 */
668 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
669 		rsp->max_dmac_filters = 0;
670 		return 0;
671 	}
672 
673 	if (rvu_npc_exact_has_match_table(rvu)) {
674 		rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
675 		return 0;
676 	}
677 
678 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
679 	rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
680 	return 0;
681 }
682 
683 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
684 				      struct cgx_mac_addr_set_or_get *req,
685 				      struct cgx_mac_addr_set_or_get *rsp)
686 {
687 	int pf = rvu_get_pf(req->hdr.pcifunc);
688 	u8 cgx_id, lmac_id;
689 	int rc = 0, i;
690 	u64 cfg;
691 
692 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
693 		return -EPERM;
694 
695 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
696 
697 	rsp->hdr.rc = rc;
698 	cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
699 	/* copy 48 bit mac address to req->mac_addr */
700 	for (i = 0; i < ETH_ALEN; i++)
701 		rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
702 	return 0;
703 }
704 
705 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
706 					struct msg_rsp *rsp)
707 {
708 	u16 pcifunc = req->hdr.pcifunc;
709 	int pf = rvu_get_pf(pcifunc);
710 	u8 cgx_id, lmac_id;
711 
712 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
713 		return -EPERM;
714 
715 	/* Disable drop on non hit rule */
716 	if (rvu_npc_exact_has_match_table(rvu))
717 		return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
718 
719 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
720 
721 	cgx_lmac_promisc_config(cgx_id, lmac_id, true);
722 	return 0;
723 }
724 
725 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
726 					 struct msg_rsp *rsp)
727 {
728 	int pf = rvu_get_pf(req->hdr.pcifunc);
729 	u8 cgx_id, lmac_id;
730 
731 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
732 		return -EPERM;
733 
734 	/* Disable drop on non hit rule */
735 	if (rvu_npc_exact_has_match_table(rvu))
736 		return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
737 
738 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
739 
740 	cgx_lmac_promisc_config(cgx_id, lmac_id, false);
741 	return 0;
742 }
743 
744 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
745 {
746 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
747 	int pf = rvu_get_pf(pcifunc);
748 	struct mac_ops *mac_ops;
749 	u8 cgx_id, lmac_id;
750 	void *cgxd;
751 
752 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
753 		return 0;
754 
755 	/* This msg is expected only from PFs that are mapped to CGX LMACs,
756 	 * if received from other PF/VF simply ACK, nothing to do.
757 	 */
758 	if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
759 	    !is_pf_cgxmapped(rvu, pf))
760 		return -ENODEV;
761 
762 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
763 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
764 
765 	mac_ops = get_mac_ops(cgxd);
766 	mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
767 	/* If PTP is enabled then inform NPC that packets to be
768 	 * parsed by this PF will have their data shifted by 8 bytes
769 	 * and if PTP is disabled then no shift is required
770 	 */
771 	if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
772 		return -EINVAL;
773 	/* This flag is required to clean up CGX conf if app gets killed */
774 	pfvf->hw_rx_tstamp_en = enable;
775 
776 	return 0;
777 }
778 
779 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
780 				       struct msg_rsp *rsp)
781 {
782 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
783 		return -EPERM;
784 
785 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
786 }
787 
788 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
789 					struct msg_rsp *rsp)
790 {
791 	return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
792 }
793 
794 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
795 {
796 	int pf = rvu_get_pf(pcifunc);
797 	u8 cgx_id, lmac_id;
798 
799 	if (!is_cgx_config_permitted(rvu, pcifunc))
800 		return -EPERM;
801 
802 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
803 
804 	if (en) {
805 		set_bit(pf, &rvu->pf_notify_bmap);
806 		/* Send the current link status to PF */
807 		rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
808 	} else {
809 		clear_bit(pf, &rvu->pf_notify_bmap);
810 	}
811 
812 	return 0;
813 }
814 
815 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
816 					  struct msg_rsp *rsp)
817 {
818 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
819 	return 0;
820 }
821 
822 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
823 					 struct msg_rsp *rsp)
824 {
825 	rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
826 	return 0;
827 }
828 
829 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
830 				      struct cgx_link_info_msg *rsp)
831 {
832 	u8 cgx_id, lmac_id;
833 	int pf, err;
834 
835 	pf = rvu_get_pf(req->hdr.pcifunc);
836 
837 	if (!is_pf_cgxmapped(rvu, pf))
838 		return -ENODEV;
839 
840 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
841 
842 	err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
843 				&rsp->link_info);
844 	return err;
845 }
846 
847 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
848 				      struct msg_req *req,
849 				      struct cgx_features_info_msg *rsp)
850 {
851 	int pf = rvu_get_pf(req->hdr.pcifunc);
852 	u8 cgx_idx, lmac;
853 	void *cgxd;
854 
855 	if (!is_pf_cgxmapped(rvu, pf))
856 		return 0;
857 
858 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
859 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
860 	rsp->lmac_features = cgx_features_get(cgxd);
861 
862 	return 0;
863 }
864 
865 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
866 {
867 	struct mac_ops *mac_ops;
868 	u32 fifo_len;
869 
870 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
871 	fifo_len = mac_ops ? mac_ops->fifo_len : 0;
872 
873 	return fifo_len;
874 }
875 
876 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
877 {
878 	struct mac_ops *mac_ops;
879 	void *cgxd;
880 
881 	cgxd = rvu_cgx_pdata(cgx, rvu);
882 	if (!cgxd)
883 		return 0;
884 
885 	mac_ops = get_mac_ops(cgxd);
886 	if (!mac_ops->lmac_fifo_len)
887 		return 0;
888 
889 	return mac_ops->lmac_fifo_len(cgxd, lmac);
890 }
891 
892 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
893 {
894 	int pf = rvu_get_pf(pcifunc);
895 	struct mac_ops *mac_ops;
896 	u8 cgx_id, lmac_id;
897 
898 	if (!is_cgx_config_permitted(rvu, pcifunc))
899 		return -EPERM;
900 
901 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
902 	mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
903 
904 	return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
905 					  lmac_id, en);
906 }
907 
908 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
909 				       struct msg_rsp *rsp)
910 {
911 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
912 	return 0;
913 }
914 
915 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
916 					struct msg_rsp *rsp)
917 {
918 	rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
919 	return 0;
920 }
921 
922 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
923 {
924 	int pf = rvu_get_pf(pcifunc);
925 	u8 rx_pfc = 0, tx_pfc = 0;
926 	struct mac_ops *mac_ops;
927 	u8 cgx_id, lmac_id;
928 	void *cgxd;
929 
930 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
931 		return 0;
932 
933 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
934 	 * if received from other PF/VF simply ACK, nothing to do.
935 	 */
936 	if (!is_pf_cgxmapped(rvu, pf))
937 		return LMAC_AF_ERR_PF_NOT_MAPPED;
938 
939 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
940 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
941 	mac_ops = get_mac_ops(cgxd);
942 
943 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
944 	if (tx_pfc || rx_pfc) {
945 		dev_warn(rvu->dev,
946 			 "Can not configure 802.3X flow control as PFC frames are enabled");
947 		return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
948 	}
949 
950 	mutex_lock(&rvu->rsrc_lock);
951 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
952 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
953 		mutex_unlock(&rvu->rsrc_lock);
954 		return LMAC_AF_ERR_PERM_DENIED;
955 	}
956 	mutex_unlock(&rvu->rsrc_lock);
957 
958 	return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
959 }
960 
961 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
962 				       struct cgx_pause_frm_cfg *req,
963 				       struct cgx_pause_frm_cfg *rsp)
964 {
965 	int pf = rvu_get_pf(req->hdr.pcifunc);
966 	struct mac_ops *mac_ops;
967 	u8 cgx_id, lmac_id;
968 	int err = 0;
969 	void *cgxd;
970 
971 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
972 	 * if received from other PF/VF simply ACK, nothing to do.
973 	 */
974 	if (!is_pf_cgxmapped(rvu, pf))
975 		return -ENODEV;
976 
977 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
978 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
979 	mac_ops = get_mac_ops(cgxd);
980 
981 	if (req->set)
982 		err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
983 	else
984 		mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
985 
986 	return err;
987 }
988 
989 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
990 					   struct msg_rsp *rsp)
991 {
992 	int pf = rvu_get_pf(req->hdr.pcifunc);
993 	u8 cgx_id, lmac_id;
994 
995 	if (!is_pf_cgxmapped(rvu, pf))
996 		return LMAC_AF_ERR_PF_NOT_MAPPED;
997 
998 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
999 	return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1000 }
1001 
1002 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1003  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1004  */
1005 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1006 			   int index, int rxtxflag, u64 *stat)
1007 {
1008 	struct rvu_block *block;
1009 	int blkaddr;
1010 	u16 pcifunc;
1011 	int pf, lf;
1012 
1013 	*stat = 0;
1014 
1015 	if (!cgxd || !rvu)
1016 		return -EINVAL;
1017 
1018 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1019 	if (pf < 0)
1020 		return pf;
1021 
1022 	/* Assumes LF of a PF and all of its VF belongs to the same
1023 	 * NIX block
1024 	 */
1025 	pcifunc = pf << RVU_PFVF_PF_SHIFT;
1026 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1027 	if (blkaddr < 0)
1028 		return 0;
1029 	block = &rvu->hw->block[blkaddr];
1030 
1031 	for (lf = 0; lf < block->lf.max; lf++) {
1032 		/* Check if a lf is attached to this PF or one of its VFs */
1033 		if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1034 			 ~RVU_PFVF_FUNC_MASK)))
1035 			continue;
1036 		if (rxtxflag == NIX_STATS_RX)
1037 			*stat += rvu_read64(rvu, blkaddr,
1038 					    NIX_AF_LFX_RX_STATX(lf, index));
1039 		else
1040 			*stat += rvu_read64(rvu, blkaddr,
1041 					    NIX_AF_LFX_TX_STATX(lf, index));
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1048 {
1049 	struct rvu_pfvf *parent_pf, *pfvf;
1050 	int cgx_users, err = 0;
1051 
1052 	if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1053 		return 0;
1054 
1055 	parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1056 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1057 
1058 	mutex_lock(&rvu->cgx_cfg_lock);
1059 
1060 	if (start && pfvf->cgx_in_use)
1061 		goto exit;  /* CGX is already started hence nothing to do */
1062 	if (!start && !pfvf->cgx_in_use)
1063 		goto exit; /* CGX is already stopped hence nothing to do */
1064 
1065 	if (start) {
1066 		cgx_users = parent_pf->cgx_users;
1067 		parent_pf->cgx_users++;
1068 	} else {
1069 		parent_pf->cgx_users--;
1070 		cgx_users = parent_pf->cgx_users;
1071 	}
1072 
1073 	/* Start CGX when first of all NIXLFs is started.
1074 	 * Stop CGX when last of all NIXLFs is stopped.
1075 	 */
1076 	if (!cgx_users) {
1077 		err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1078 					  start);
1079 		if (err) {
1080 			dev_err(rvu->dev, "Unable to %s CGX\n",
1081 				start ? "start" : "stop");
1082 			/* Revert the usage count in case of error */
1083 			parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
1084 					       : parent_pf->cgx_users  + 1;
1085 			goto exit;
1086 		}
1087 	}
1088 	pfvf->cgx_in_use = start;
1089 exit:
1090 	mutex_unlock(&rvu->cgx_cfg_lock);
1091 	return err;
1092 }
1093 
1094 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1095 				       struct fec_mode *req,
1096 				       struct fec_mode *rsp)
1097 {
1098 	int pf = rvu_get_pf(req->hdr.pcifunc);
1099 	u8 cgx_id, lmac_id;
1100 
1101 	if (!is_pf_cgxmapped(rvu, pf))
1102 		return -EPERM;
1103 
1104 	if (req->fec == OTX2_FEC_OFF)
1105 		req->fec = OTX2_FEC_NONE;
1106 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1107 	rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1108 	return 0;
1109 }
1110 
1111 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1112 					   struct cgx_fw_data *rsp)
1113 {
1114 	int pf = rvu_get_pf(req->hdr.pcifunc);
1115 	u8 cgx_id, lmac_id;
1116 
1117 	if (!rvu->fwdata)
1118 		return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1119 
1120 	if (!is_pf_cgxmapped(rvu, pf))
1121 		return -EPERM;
1122 
1123 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1124 
1125 	if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1126 		memcpy(&rsp->fwdata,
1127 		       &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1128 		       sizeof(struct cgx_lmac_fwdata_s));
1129 	else
1130 		memcpy(&rsp->fwdata,
1131 		       &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1132 		       sizeof(struct cgx_lmac_fwdata_s));
1133 
1134 	return 0;
1135 }
1136 
1137 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1138 				       struct cgx_set_link_mode_req *req,
1139 				       struct cgx_set_link_mode_rsp *rsp)
1140 {
1141 	int pf = rvu_get_pf(req->hdr.pcifunc);
1142 	u8 cgx_idx, lmac;
1143 	void *cgxd;
1144 
1145 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1146 		return -EPERM;
1147 
1148 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1149 	cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1150 	rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1151 	return 0;
1152 }
1153 
1154 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1155 					struct msg_rsp *rsp)
1156 {
1157 	int pf = rvu_get_pf(req->hdr.pcifunc);
1158 	u8 cgx_id, lmac_id;
1159 
1160 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1161 		return LMAC_AF_ERR_PERM_DENIED;
1162 
1163 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1164 
1165 	if (rvu_npc_exact_has_match_table(rvu))
1166 		return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1167 
1168 	return cgx_lmac_addr_reset(cgx_id, lmac_id);
1169 }
1170 
1171 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1172 					 struct cgx_mac_addr_update_req *req,
1173 					 struct cgx_mac_addr_update_rsp *rsp)
1174 {
1175 	int pf = rvu_get_pf(req->hdr.pcifunc);
1176 	u8 cgx_id, lmac_id;
1177 
1178 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1179 		return LMAC_AF_ERR_PERM_DENIED;
1180 
1181 	if (rvu_npc_exact_has_match_table(rvu))
1182 		return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1183 
1184 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1185 	return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1186 }
1187 
1188 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1189 			       u8 rx_pause, u16 pfc_en)
1190 {
1191 	int pf = rvu_get_pf(pcifunc);
1192 	u8 rx_8023 = 0, tx_8023 = 0;
1193 	struct mac_ops *mac_ops;
1194 	u8 cgx_id, lmac_id;
1195 	void *cgxd;
1196 
1197 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1198 	 * if received from other PF/VF simply ACK, nothing to do.
1199 	 */
1200 	if (!is_pf_cgxmapped(rvu, pf))
1201 		return -ENODEV;
1202 
1203 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1204 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1205 	mac_ops = get_mac_ops(cgxd);
1206 
1207 	mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1208 	if (tx_8023 || rx_8023) {
1209 		dev_warn(rvu->dev,
1210 			 "Can not configure PFC as 802.3X pause frames are enabled");
1211 		return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1212 	}
1213 
1214 	mutex_lock(&rvu->rsrc_lock);
1215 	if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1216 			       pcifunc & RVU_PFVF_FUNC_MASK)) {
1217 		mutex_unlock(&rvu->rsrc_lock);
1218 		return LMAC_AF_ERR_PERM_DENIED;
1219 	}
1220 	mutex_unlock(&rvu->rsrc_lock);
1221 
1222 	return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1223 }
1224 
1225 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1226 					    struct cgx_pfc_cfg *req,
1227 					    struct cgx_pfc_rsp *rsp)
1228 {
1229 	int pf = rvu_get_pf(req->hdr.pcifunc);
1230 	struct mac_ops *mac_ops;
1231 	u8 cgx_id, lmac_id;
1232 	void *cgxd;
1233 	int err;
1234 
1235 	/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1236 	 * if received from other PF/VF simply ACK, nothing to do.
1237 	 */
1238 	if (!is_pf_cgxmapped(rvu, pf))
1239 		return -ENODEV;
1240 
1241 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1242 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
1243 	mac_ops = get_mac_ops(cgxd);
1244 
1245 	err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1246 					 req->rx_pause, req->pfc_en);
1247 
1248 	mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1249 	return err;
1250 }
1251