1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/phy.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 
23 #include "cgx.h"
24 #include "rvu.h"
25 #include "lmac_common.h"
26 
27 #define DRV_NAME	"Marvell-CGX/RPM"
28 #define DRV_STRING      "Marvell CGX/RPM Driver"
29 
30 static LIST_HEAD(cgx_list);
31 
32 /* Convert firmware speed encoding to user format(Mbps) */
33 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
34 	[CGX_LINK_NONE] = 0,
35 	[CGX_LINK_10M] = 10,
36 	[CGX_LINK_100M] = 100,
37 	[CGX_LINK_1G] = 1000,
38 	[CGX_LINK_2HG] = 2500,
39 	[CGX_LINK_5G] = 5000,
40 	[CGX_LINK_10G] = 10000,
41 	[CGX_LINK_20G] = 20000,
42 	[CGX_LINK_25G] = 25000,
43 	[CGX_LINK_40G] = 40000,
44 	[CGX_LINK_50G] = 50000,
45 	[CGX_LINK_80G] = 80000,
46 	[CGX_LINK_100G] = 100000,
47 };
48 
49 /* Convert firmware lmac type encoding to string */
50 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
51 	[LMAC_MODE_SGMII] = "SGMII",
52 	[LMAC_MODE_XAUI] = "XAUI",
53 	[LMAC_MODE_RXAUI] = "RXAUI",
54 	[LMAC_MODE_10G_R] = "10G_R",
55 	[LMAC_MODE_40G_R] = "40G_R",
56 	[LMAC_MODE_QSGMII] = "QSGMII",
57 	[LMAC_MODE_25G_R] = "25G_R",
58 	[LMAC_MODE_50G_R] = "50G_R",
59 	[LMAC_MODE_100G_R] = "100G_R",
60 	[LMAC_MODE_USXGMII] = "USXGMII",
61 };
62 
63 /* CGX PHY management internal APIs */
64 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
65 
66 /* Supported devices */
67 static const struct pci_device_id cgx_id_table[] = {
68 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
69 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
70 	{ 0, }  /* end of table */
71 };
72 
73 MODULE_DEVICE_TABLE(pci, cgx_id_table);
74 
75 static bool is_dev_rpm(void *cgxd)
76 {
77 	struct cgx *cgx = cgxd;
78 
79 	return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
80 }
81 
82 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
83 {
84 	if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
85 		return false;
86 	return test_bit(lmac_id, &cgx->lmac_bmap);
87 }
88 
89 struct mac_ops *get_mac_ops(void *cgxd)
90 {
91 	if (!cgxd)
92 		return cgxd;
93 
94 	return ((struct cgx *)cgxd)->mac_ops;
95 }
96 
97 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
98 {
99 	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
100 	       offset);
101 }
102 
103 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
104 {
105 	return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
106 		     offset);
107 }
108 
109 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
110 {
111 	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
112 		return NULL;
113 
114 	return cgx->lmac_idmap[lmac_id];
115 }
116 
117 int cgx_get_cgxcnt_max(void)
118 {
119 	struct cgx *cgx_dev;
120 	int idmax = -ENODEV;
121 
122 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
123 		if (cgx_dev->cgx_id > idmax)
124 			idmax = cgx_dev->cgx_id;
125 
126 	if (idmax < 0)
127 		return 0;
128 
129 	return idmax + 1;
130 }
131 
132 int cgx_get_lmac_cnt(void *cgxd)
133 {
134 	struct cgx *cgx = cgxd;
135 
136 	if (!cgx)
137 		return -ENODEV;
138 
139 	return cgx->lmac_count;
140 }
141 
142 void *cgx_get_pdata(int cgx_id)
143 {
144 	struct cgx *cgx_dev;
145 
146 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
147 		if (cgx_dev->cgx_id == cgx_id)
148 			return cgx_dev;
149 	}
150 	return NULL;
151 }
152 
153 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
154 {
155 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
156 
157 	cgx_write(cgx_dev, lmac_id, offset, val);
158 }
159 
160 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
161 {
162 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
163 
164 	return cgx_read(cgx_dev, lmac_id, offset);
165 }
166 
167 int cgx_get_cgxid(void *cgxd)
168 {
169 	struct cgx *cgx = cgxd;
170 
171 	if (!cgx)
172 		return -EINVAL;
173 
174 	return cgx->cgx_id;
175 }
176 
177 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
178 {
179 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
180 	u64 cfg;
181 
182 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
183 
184 	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
185 }
186 
187 /* Ensure the required lock for event queue(where asynchronous events are
188  * posted) is acquired before calling this API. Else an asynchronous event(with
189  * latest link status) can reach the destination before this function returns
190  * and could make the link status appear wrong.
191  */
192 int cgx_get_link_info(void *cgxd, int lmac_id,
193 		      struct cgx_link_user_info *linfo)
194 {
195 	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
196 
197 	if (!lmac)
198 		return -ENODEV;
199 
200 	*linfo = lmac->link_info;
201 	return 0;
202 }
203 
204 static u64 mac2u64 (u8 *mac_addr)
205 {
206 	u64 mac = 0;
207 	int index;
208 
209 	for (index = ETH_ALEN - 1; index >= 0; index--)
210 		mac |= ((u64)*mac_addr++) << (8 * index);
211 	return mac;
212 }
213 
214 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
215 {
216 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
217 	struct mac_ops *mac_ops;
218 	u64 cfg;
219 
220 	mac_ops = cgx_dev->mac_ops;
221 	/* copy 6bytes from macaddr */
222 	/* memcpy(&cfg, mac_addr, 6); */
223 
224 	cfg = mac2u64 (mac_addr);
225 
226 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
227 		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
228 
229 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
230 	cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
231 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
232 
233 	return 0;
234 }
235 
236 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
237 {
238 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
239 	struct mac_ops *mac_ops;
240 	u64 cfg;
241 
242 	mac_ops = cgx_dev->mac_ops;
243 
244 	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
245 	return cfg & CGX_RX_DMAC_ADR_MASK;
246 }
247 
248 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
249 {
250 	struct cgx *cgx = cgxd;
251 
252 	if (!is_lmac_valid(cgx, lmac_id))
253 		return -ENODEV;
254 
255 	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
256 	return 0;
257 }
258 
259 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
260 {
261 	struct cgx *cgx = cgxd;
262 	u64 cfg;
263 
264 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
265 	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
266 }
267 
268 /* Configure CGX LMAC in internal loopback mode */
269 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
270 {
271 	struct cgx *cgx = cgxd;
272 	u8 lmac_type;
273 	u64 cfg;
274 
275 	if (!is_lmac_valid(cgx, lmac_id))
276 		return -ENODEV;
277 
278 	lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
279 	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
280 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
281 		if (enable)
282 			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
283 		else
284 			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
285 		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
286 	} else {
287 		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
288 		if (enable)
289 			cfg |= CGXX_SPUX_CONTROL1_LBK;
290 		else
291 			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
292 		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
293 	}
294 	return 0;
295 }
296 
297 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
298 {
299 	struct cgx *cgx = cgx_get_pdata(cgx_id);
300 	struct mac_ops *mac_ops;
301 	u64 cfg = 0;
302 
303 	if (!cgx)
304 		return;
305 
306 	mac_ops = cgx->mac_ops;
307 	if (enable) {
308 		/* Enable promiscuous mode on LMAC */
309 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
310 		cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
311 		cfg |= CGX_DMAC_BCAST_MODE;
312 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
313 
314 		cfg = cgx_read(cgx, 0,
315 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
316 		cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
317 		cgx_write(cgx, 0,
318 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
319 	} else {
320 		/* Disable promiscuous mode */
321 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
322 		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
323 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
324 		cfg = cgx_read(cgx, 0,
325 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
326 		cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
327 		cgx_write(cgx, 0,
328 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
329 	}
330 }
331 
332 /* Enable or disable forwarding received pause frames to Tx block */
333 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
334 {
335 	struct cgx *cgx = cgxd;
336 	u64 cfg;
337 
338 	if (!cgx)
339 		return;
340 
341 	if (enable) {
342 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
343 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
344 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
345 
346 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
347 		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
348 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
349 	} else {
350 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
351 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
352 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
353 
354 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
355 		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
356 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
357 	}
358 }
359 
360 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
361 {
362 	struct cgx *cgx = cgxd;
363 
364 	if (!is_lmac_valid(cgx, lmac_id))
365 		return -ENODEV;
366 	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
367 	return 0;
368 }
369 
370 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
371 {
372 	struct cgx *cgx = cgxd;
373 
374 	if (!is_lmac_valid(cgx, lmac_id))
375 		return -ENODEV;
376 	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
377 	return 0;
378 }
379 
380 u64 cgx_features_get(void *cgxd)
381 {
382 	return ((struct cgx *)cgxd)->hw_features;
383 }
384 
385 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
386 {
387 	if (!linfo->fec)
388 		return 0;
389 
390 	switch (linfo->lmac_type_id) {
391 	case LMAC_MODE_SGMII:
392 	case LMAC_MODE_XAUI:
393 	case LMAC_MODE_RXAUI:
394 	case LMAC_MODE_QSGMII:
395 		return 0;
396 	case LMAC_MODE_10G_R:
397 	case LMAC_MODE_25G_R:
398 	case LMAC_MODE_100G_R:
399 	case LMAC_MODE_USXGMII:
400 		return 1;
401 	case LMAC_MODE_40G_R:
402 		return 4;
403 	case LMAC_MODE_50G_R:
404 		if (linfo->fec == OTX2_FEC_BASER)
405 			return 2;
406 		else
407 			return 1;
408 	default:
409 		return 0;
410 	}
411 }
412 
413 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
414 {
415 	int stats, fec_stats_count = 0;
416 	int corr_reg, uncorr_reg;
417 	struct cgx *cgx = cgxd;
418 
419 	if (!cgx || lmac_id >= cgx->lmac_count)
420 		return -ENODEV;
421 	fec_stats_count =
422 		cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
423 	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
424 		corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
425 		uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
426 	} else {
427 		corr_reg = CGXX_SPUX_RSFEC_CORR;
428 		uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
429 	}
430 	for (stats = 0; stats < fec_stats_count; stats++) {
431 		rsp->fec_corr_blks +=
432 			cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
433 		rsp->fec_uncorr_blks +=
434 			cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
435 	}
436 	return 0;
437 }
438 
439 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
440 {
441 	struct cgx *cgx = cgxd;
442 	u64 cfg;
443 
444 	if (!is_lmac_valid(cgx, lmac_id))
445 		return -ENODEV;
446 
447 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
448 	if (enable)
449 		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
450 	else
451 		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
452 	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
453 	return 0;
454 }
455 
456 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
457 {
458 	struct cgx *cgx = cgxd;
459 	u64 cfg, last;
460 
461 	if (!is_lmac_valid(cgx, lmac_id))
462 		return -ENODEV;
463 
464 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
465 	last = cfg;
466 	if (enable)
467 		cfg |= DATA_PKT_TX_EN;
468 	else
469 		cfg &= ~DATA_PKT_TX_EN;
470 
471 	if (cfg != last)
472 		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
473 	return !!(last & DATA_PKT_TX_EN);
474 }
475 
476 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
477 					 u8 *tx_pause, u8 *rx_pause)
478 {
479 	struct cgx *cgx = cgxd;
480 	u64 cfg;
481 
482 	if (is_dev_rpm(cgx))
483 		return 0;
484 
485 	if (!is_lmac_valid(cgx, lmac_id))
486 		return -ENODEV;
487 
488 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
489 	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
490 
491 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
492 	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
493 	return 0;
494 }
495 
496 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
497 				     u8 tx_pause, u8 rx_pause)
498 {
499 	struct cgx *cgx = cgxd;
500 	u64 cfg;
501 
502 	if (is_dev_rpm(cgx))
503 		return 0;
504 
505 	if (!is_lmac_valid(cgx, lmac_id))
506 		return -ENODEV;
507 
508 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
509 	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
510 	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
511 	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
512 
513 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
514 	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
515 	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
516 	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
517 
518 	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
519 	if (tx_pause) {
520 		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
521 	} else {
522 		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
523 		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
524 	}
525 	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
526 	return 0;
527 }
528 
529 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
530 {
531 	struct cgx *cgx = cgxd;
532 	u64 cfg;
533 
534 	if (!is_lmac_valid(cgx, lmac_id))
535 		return;
536 	if (enable) {
537 		/* Enable receive pause frames */
538 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
539 		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
540 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
541 
542 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
543 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
544 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
545 
546 		/* Enable pause frames transmission */
547 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
548 		cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
549 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
550 
551 		/* Set pause time and interval */
552 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
553 			  DEFAULT_PAUSE_TIME);
554 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
555 		cfg &= ~0xFFFFULL;
556 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
557 			  cfg | (DEFAULT_PAUSE_TIME / 2));
558 
559 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
560 			  DEFAULT_PAUSE_TIME);
561 
562 		cfg = cgx_read(cgx, lmac_id,
563 			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
564 		cfg &= ~0xFFFFULL;
565 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
566 			  cfg | (DEFAULT_PAUSE_TIME / 2));
567 	} else {
568 		/* ALL pause frames received are completely ignored */
569 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
570 		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
571 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
572 
573 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
574 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
575 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
576 
577 		/* Disable pause frames transmission */
578 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
579 		cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
580 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
581 	}
582 }
583 
584 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
585 {
586 	struct cgx *cgx = cgxd;
587 	u64 cfg;
588 
589 	if (!cgx)
590 		return;
591 
592 	if (is_dev_rpm(cgx))
593 		return;
594 
595 	if (enable) {
596 		/* Enable inbound PTP timestamping */
597 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
598 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
599 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
600 
601 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
602 		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
603 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
604 	} else {
605 		/* Disable inbound PTP stamping */
606 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
607 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
608 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
609 
610 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
611 		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
612 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
613 	}
614 }
615 
616 /* CGX Firmware interface low level support */
617 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
618 {
619 	struct cgx *cgx = lmac->cgx;
620 	struct device *dev;
621 	int err = 0;
622 	u64 cmd;
623 
624 	/* Ensure no other command is in progress */
625 	err = mutex_lock_interruptible(&lmac->cmd_lock);
626 	if (err)
627 		return err;
628 
629 	/* Ensure command register is free */
630 	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
631 	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
632 		err = -EBUSY;
633 		goto unlock;
634 	}
635 
636 	/* Update ownership in command request */
637 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
638 
639 	/* Mark this lmac as pending, before we start */
640 	lmac->cmd_pend = true;
641 
642 	/* Start command in hardware */
643 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
644 
645 	/* Ensure command is completed without errors */
646 	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
647 				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
648 		dev = &cgx->pdev->dev;
649 		dev_err(dev, "cgx port %d:%d cmd timeout\n",
650 			cgx->cgx_id, lmac->lmac_id);
651 		err = -EIO;
652 		goto unlock;
653 	}
654 
655 	/* we have a valid command response */
656 	smp_rmb(); /* Ensure the latest updates are visible */
657 	*resp = lmac->resp;
658 
659 unlock:
660 	mutex_unlock(&lmac->cmd_lock);
661 
662 	return err;
663 }
664 
665 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
666 {
667 	struct lmac *lmac;
668 	int err;
669 
670 	lmac = lmac_pdata(lmac_id, cgx);
671 	if (!lmac)
672 		return -ENODEV;
673 
674 	err = cgx_fwi_cmd_send(req, resp, lmac);
675 
676 	/* Check for valid response */
677 	if (!err) {
678 		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
679 			return -EIO;
680 		else
681 			return 0;
682 	}
683 
684 	return err;
685 }
686 
687 static int cgx_link_usertable_index_map(int speed)
688 {
689 	switch (speed) {
690 	case SPEED_10:
691 		return CGX_LINK_10M;
692 	case SPEED_100:
693 		return CGX_LINK_100M;
694 	case SPEED_1000:
695 		return CGX_LINK_1G;
696 	case SPEED_2500:
697 		return CGX_LINK_2HG;
698 	case SPEED_5000:
699 		return CGX_LINK_5G;
700 	case SPEED_10000:
701 		return CGX_LINK_10G;
702 	case SPEED_20000:
703 		return CGX_LINK_20G;
704 	case SPEED_25000:
705 		return CGX_LINK_25G;
706 	case SPEED_40000:
707 		return CGX_LINK_40G;
708 	case SPEED_50000:
709 		return CGX_LINK_50G;
710 	case 80000:
711 		return CGX_LINK_80G;
712 	case SPEED_100000:
713 		return CGX_LINK_100G;
714 	case SPEED_UNKNOWN:
715 		return CGX_LINK_NONE;
716 	}
717 	return CGX_LINK_NONE;
718 }
719 
720 static void set_mod_args(struct cgx_set_link_mode_args *args,
721 			 u32 speed, u8 duplex, u8 autoneg, u64 mode)
722 {
723 	/* Fill default values incase of user did not pass
724 	 * valid parameters
725 	 */
726 	if (args->duplex == DUPLEX_UNKNOWN)
727 		args->duplex = duplex;
728 	if (args->speed == SPEED_UNKNOWN)
729 		args->speed = speed;
730 	if (args->an == AUTONEG_UNKNOWN)
731 		args->an = autoneg;
732 	args->mode = mode;
733 	args->ports = 0;
734 }
735 
736 static void otx2_map_ethtool_link_modes(u64 bitmask,
737 					struct cgx_set_link_mode_args *args)
738 {
739 	switch (bitmask) {
740 	case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
741 		set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
742 		break;
743 	case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
744 		set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
745 		break;
746 	case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
747 		set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
748 		break;
749 	case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
750 		set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
751 		break;
752 	case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
753 		set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
754 		break;
755 	case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
756 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
757 		break;
758 	case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
759 		set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
760 		break;
761 	case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
762 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
763 		break;
764 	case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
765 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
766 		break;
767 	case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
768 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
769 		break;
770 	case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
771 		set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
772 		break;
773 	case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
774 		set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
775 		break;
776 	case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
777 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
778 		break;
779 	case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
780 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
781 		break;
782 	case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
783 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
784 		break;
785 	case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
786 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
787 		break;
788 	case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
789 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
790 		break;
791 	case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
792 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
793 		break;
794 	case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
795 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
796 		break;
797 	case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
798 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
799 		break;
800 	case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
801 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
802 		break;
803 	case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
804 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
805 		break;
806 	case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
807 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
808 		break;
809 	case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
810 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
811 		break;
812 	case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
813 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
814 		break;
815 	case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
816 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
817 		break;
818 	default:
819 		set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
820 		break;
821 	}
822 }
823 
824 static inline void link_status_user_format(u64 lstat,
825 					   struct cgx_link_user_info *linfo,
826 					   struct cgx *cgx, u8 lmac_id)
827 {
828 	const char *lmac_string;
829 
830 	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
831 	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
832 	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
833 	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
834 	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
835 	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
836 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
837 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
838 }
839 
840 /* Hardware event handlers */
841 static inline void cgx_link_change_handler(u64 lstat,
842 					   struct lmac *lmac)
843 {
844 	struct cgx_link_user_info *linfo;
845 	struct cgx *cgx = lmac->cgx;
846 	struct cgx_link_event event;
847 	struct device *dev;
848 	int err_type;
849 
850 	dev = &cgx->pdev->dev;
851 
852 	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
853 	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
854 
855 	event.cgx_id = cgx->cgx_id;
856 	event.lmac_id = lmac->lmac_id;
857 
858 	/* update the local copy of link status */
859 	lmac->link_info = event.link_uinfo;
860 	linfo = &lmac->link_info;
861 
862 	if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
863 		return;
864 
865 	/* Ensure callback doesn't get unregistered until we finish it */
866 	spin_lock(&lmac->event_cb_lock);
867 
868 	if (!lmac->event_cb.notify_link_chg) {
869 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
870 			cgx->cgx_id, lmac->lmac_id);
871 		if (err_type != CGX_ERR_NONE) {
872 			dev_err(dev, "cgx port %d:%d Link error %d\n",
873 				cgx->cgx_id, lmac->lmac_id, err_type);
874 		}
875 		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
876 			 cgx->cgx_id, lmac->lmac_id,
877 			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
878 		goto err;
879 	}
880 
881 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
882 		dev_err(dev, "event notification failure\n");
883 err:
884 	spin_unlock(&lmac->event_cb_lock);
885 }
886 
887 static inline bool cgx_cmdresp_is_linkevent(u64 event)
888 {
889 	u8 id;
890 
891 	id = FIELD_GET(EVTREG_ID, event);
892 	if (id == CGX_CMD_LINK_BRING_UP ||
893 	    id == CGX_CMD_LINK_BRING_DOWN ||
894 	    id == CGX_CMD_MODE_CHANGE)
895 		return true;
896 	else
897 		return false;
898 }
899 
900 static inline bool cgx_event_is_linkevent(u64 event)
901 {
902 	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
903 		return true;
904 	else
905 		return false;
906 }
907 
908 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
909 {
910 	u64 event, offset, clear_bit;
911 	struct lmac *lmac = data;
912 	struct cgx *cgx;
913 
914 	cgx = lmac->cgx;
915 
916 	/* Clear SW_INT for RPM and CMR_INT for CGX */
917 	offset     = cgx->mac_ops->int_register;
918 	clear_bit  = cgx->mac_ops->int_ena_bit;
919 
920 	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
921 
922 	if (!FIELD_GET(EVTREG_ACK, event))
923 		return IRQ_NONE;
924 
925 	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
926 	case CGX_EVT_CMD_RESP:
927 		/* Copy the response. Since only one command is active at a
928 		 * time, there is no way a response can get overwritten
929 		 */
930 		lmac->resp = event;
931 		/* Ensure response is updated before thread context starts */
932 		smp_wmb();
933 
934 		/* There wont be separate events for link change initiated from
935 		 * software; Hence report the command responses as events
936 		 */
937 		if (cgx_cmdresp_is_linkevent(event))
938 			cgx_link_change_handler(event, lmac);
939 
940 		/* Release thread waiting for completion  */
941 		lmac->cmd_pend = false;
942 		wake_up_interruptible(&lmac->wq_cmd_cmplt);
943 		break;
944 	case CGX_EVT_ASYNC:
945 		if (cgx_event_is_linkevent(event))
946 			cgx_link_change_handler(event, lmac);
947 		break;
948 	}
949 
950 	/* Any new event or command response will be posted by firmware
951 	 * only after the current status is acked.
952 	 * Ack the interrupt register as well.
953 	 */
954 	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
955 	cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
956 
957 	return IRQ_HANDLED;
958 }
959 
960 /* APIs for PHY management using CGX firmware interface */
961 
962 /* callback registration for hardware events like link change */
963 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
964 {
965 	struct cgx *cgx = cgxd;
966 	struct lmac *lmac;
967 
968 	lmac = lmac_pdata(lmac_id, cgx);
969 	if (!lmac)
970 		return -ENODEV;
971 
972 	lmac->event_cb = *cb;
973 
974 	return 0;
975 }
976 
977 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
978 {
979 	struct lmac *lmac;
980 	unsigned long flags;
981 	struct cgx *cgx = cgxd;
982 
983 	lmac = lmac_pdata(lmac_id, cgx);
984 	if (!lmac)
985 		return -ENODEV;
986 
987 	spin_lock_irqsave(&lmac->event_cb_lock, flags);
988 	lmac->event_cb.notify_link_chg = NULL;
989 	lmac->event_cb.data = NULL;
990 	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
991 
992 	return 0;
993 }
994 
995 int cgx_get_fwdata_base(u64 *base)
996 {
997 	u64 req = 0, resp;
998 	struct cgx *cgx;
999 	int first_lmac;
1000 	int err;
1001 
1002 	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1003 	if (!cgx)
1004 		return -ENXIO;
1005 
1006 	first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1007 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1008 	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1009 	if (!err)
1010 		*base = FIELD_GET(RESP_FWD_BASE, resp);
1011 
1012 	return err;
1013 }
1014 
1015 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1016 		      int cgx_id, int lmac_id)
1017 {
1018 	struct cgx *cgx = cgxd;
1019 	u64 req = 0, resp;
1020 
1021 	if (!cgx)
1022 		return -ENODEV;
1023 
1024 	if (args.mode)
1025 		otx2_map_ethtool_link_modes(args.mode, &args);
1026 	if (!args.speed && args.duplex && !args.an)
1027 		return -EINVAL;
1028 
1029 	req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1030 	req = FIELD_SET(CMDMODECHANGE_SPEED,
1031 			cgx_link_usertable_index_map(args.speed), req);
1032 	req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1033 	req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1034 	req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1035 	req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1036 
1037 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1038 }
1039 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1040 {
1041 	u64 req = 0, resp;
1042 	struct cgx *cgx;
1043 	int err = 0;
1044 
1045 	cgx = cgx_get_pdata(cgx_id);
1046 	if (!cgx)
1047 		return -ENXIO;
1048 
1049 	req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1050 	req = FIELD_SET(CMDSETFEC, fec, req);
1051 	err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1052 	if (err)
1053 		return err;
1054 
1055 	cgx->lmac_idmap[lmac_id]->link_info.fec =
1056 			FIELD_GET(RESP_LINKSTAT_FEC, resp);
1057 	return cgx->lmac_idmap[lmac_id]->link_info.fec;
1058 }
1059 
1060 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1061 {
1062 	struct cgx *cgx = cgxd;
1063 	u64 req = 0, resp;
1064 
1065 	if (!cgx)
1066 		return -ENODEV;
1067 
1068 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1069 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1070 }
1071 
1072 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1073 {
1074 	u64 req = 0;
1075 	u64 resp;
1076 
1077 	if (enable)
1078 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1079 	else
1080 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1081 
1082 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1083 }
1084 
1085 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1086 {
1087 	int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1088 	u64 req = 0;
1089 
1090 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1091 	return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1092 }
1093 
1094 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1095 {
1096 	struct device *dev = &cgx->pdev->dev;
1097 	int major_ver, minor_ver;
1098 	u64 resp;
1099 	int err;
1100 
1101 	if (!cgx->lmac_count)
1102 		return 0;
1103 
1104 	err = cgx_fwi_read_version(&resp, cgx);
1105 	if (err)
1106 		return err;
1107 
1108 	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1109 	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1110 	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1111 		major_ver, minor_ver);
1112 	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1113 		return -EIO;
1114 	else
1115 		return 0;
1116 }
1117 
1118 static void cgx_lmac_linkup_work(struct work_struct *work)
1119 {
1120 	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1121 	struct device *dev = &cgx->pdev->dev;
1122 	int i, err;
1123 
1124 	/* Do Link up for all the enabled lmacs */
1125 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1126 		err = cgx_fwi_link_change(cgx, i, true);
1127 		if (err)
1128 			dev_info(dev, "cgx port %d:%d Link up command failed\n",
1129 				 cgx->cgx_id, i);
1130 	}
1131 }
1132 
1133 int cgx_lmac_linkup_start(void *cgxd)
1134 {
1135 	struct cgx *cgx = cgxd;
1136 
1137 	if (!cgx)
1138 		return -ENODEV;
1139 
1140 	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1141 
1142 	return 0;
1143 }
1144 
1145 static void cgx_lmac_get_fifolen(struct cgx *cgx)
1146 {
1147 	u64 cfg;
1148 
1149 	cfg = cgx_read(cgx, 0, CGX_CONST);
1150 	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1151 }
1152 
1153 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1154 				   int cnt, bool req_free)
1155 {
1156 	struct mac_ops *mac_ops = cgx->mac_ops;
1157 	u64 offset, ena_bit;
1158 	unsigned int irq;
1159 	int err;
1160 
1161 	irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1162 				  cnt * mac_ops->irq_offset);
1163 	offset   = mac_ops->int_set_reg;
1164 	ena_bit  = mac_ops->int_ena_bit;
1165 
1166 	if (req_free) {
1167 		free_irq(irq, lmac);
1168 		return 0;
1169 	}
1170 
1171 	err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1172 	if (err)
1173 		return err;
1174 
1175 	/* Enable interrupt */
1176 	cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1177 	return 0;
1178 }
1179 
1180 int cgx_get_nr_lmacs(void *cgxd)
1181 {
1182 	struct cgx *cgx = cgxd;
1183 
1184 	return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1185 }
1186 
1187 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1188 {
1189 	struct cgx *cgx = cgxd;
1190 
1191 	return cgx->lmac_idmap[lmac_index]->lmac_id;
1192 }
1193 
1194 unsigned long cgx_get_lmac_bmap(void *cgxd)
1195 {
1196 	struct cgx *cgx = cgxd;
1197 
1198 	return cgx->lmac_bmap;
1199 }
1200 
1201 static int cgx_lmac_init(struct cgx *cgx)
1202 {
1203 	struct lmac *lmac;
1204 	u64 lmac_list;
1205 	int i, err;
1206 
1207 	cgx_lmac_get_fifolen(cgx);
1208 
1209 	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1210 	/* lmac_list specifies which lmacs are enabled
1211 	 * when bit n is set to 1, LMAC[n] is enabled
1212 	 */
1213 	if (cgx->mac_ops->non_contiguous_serdes_lane)
1214 		lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1215 
1216 	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1217 		cgx->lmac_count = MAX_LMAC_PER_CGX;
1218 
1219 	for (i = 0; i < cgx->lmac_count; i++) {
1220 		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1221 		if (!lmac)
1222 			return -ENOMEM;
1223 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1224 		if (!lmac->name) {
1225 			err = -ENOMEM;
1226 			goto err_lmac_free;
1227 		}
1228 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1229 		if (cgx->mac_ops->non_contiguous_serdes_lane) {
1230 			lmac->lmac_id = __ffs64(lmac_list);
1231 			lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1232 		} else {
1233 			lmac->lmac_id = i;
1234 		}
1235 
1236 		lmac->cgx = cgx;
1237 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
1238 		mutex_init(&lmac->cmd_lock);
1239 		spin_lock_init(&lmac->event_cb_lock);
1240 		err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1241 		if (err)
1242 			goto err_irq;
1243 
1244 		/* Add reference */
1245 		cgx->lmac_idmap[lmac->lmac_id] = lmac;
1246 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1247 		set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1248 	}
1249 
1250 	return cgx_lmac_verify_fwi_version(cgx);
1251 
1252 err_irq:
1253 	kfree(lmac->name);
1254 err_lmac_free:
1255 	kfree(lmac);
1256 	return err;
1257 }
1258 
1259 static int cgx_lmac_exit(struct cgx *cgx)
1260 {
1261 	struct lmac *lmac;
1262 	int i;
1263 
1264 	if (cgx->cgx_cmd_workq) {
1265 		flush_workqueue(cgx->cgx_cmd_workq);
1266 		destroy_workqueue(cgx->cgx_cmd_workq);
1267 		cgx->cgx_cmd_workq = NULL;
1268 	}
1269 
1270 	/* Free all lmac related resources */
1271 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1272 		lmac = cgx->lmac_idmap[i];
1273 		if (!lmac)
1274 			continue;
1275 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1276 		cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1277 		kfree(lmac->name);
1278 		kfree(lmac);
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static void cgx_populate_features(struct cgx *cgx)
1285 {
1286 	if (is_dev_rpm(cgx))
1287 		cgx->hw_features =  (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
1288 	else
1289 		cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1290 }
1291 
1292 static struct mac_ops	cgx_mac_ops    = {
1293 	.name		=       "cgx",
1294 	.csr_offset	=       0,
1295 	.lmac_offset    =       18,
1296 	.int_register	=       CGXX_CMRX_INT,
1297 	.int_set_reg	=       CGXX_CMRX_INT_ENA_W1S,
1298 	.irq_offset	=       9,
1299 	.int_ena_bit    =       FW_CGX_INT,
1300 	.lmac_fwi	=	CGX_LMAC_FWI,
1301 	.non_contiguous_serdes_lane = false,
1302 	.rx_stats_cnt   =       9,
1303 	.tx_stats_cnt   =       18,
1304 	.get_nr_lmacs	=	cgx_get_nr_lmacs,
1305 	.get_lmac_type  =       cgx_get_lmac_type,
1306 	.mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1307 	.mac_get_rx_stats  =	cgx_get_rx_stats,
1308 	.mac_get_tx_stats  =	cgx_get_tx_stats,
1309 	.mac_enadis_rx_pause_fwding =	cgx_lmac_enadis_rx_pause_fwding,
1310 	.mac_get_pause_frm_status =	cgx_lmac_get_pause_frm_status,
1311 	.mac_enadis_pause_frm =		cgx_lmac_enadis_pause_frm,
1312 	.mac_pause_frm_config =		cgx_lmac_pause_frm_config,
1313 };
1314 
1315 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1316 {
1317 	struct device *dev = &pdev->dev;
1318 	struct cgx *cgx;
1319 	int err, nvec;
1320 
1321 	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1322 	if (!cgx)
1323 		return -ENOMEM;
1324 	cgx->pdev = pdev;
1325 
1326 	pci_set_drvdata(pdev, cgx);
1327 
1328 	/* Use mac_ops to get MAC specific features */
1329 	if (pdev->device == PCI_DEVID_CN10K_RPM)
1330 		cgx->mac_ops = rpm_get_mac_ops();
1331 	else
1332 		cgx->mac_ops = &cgx_mac_ops;
1333 
1334 	err = pci_enable_device(pdev);
1335 	if (err) {
1336 		dev_err(dev, "Failed to enable PCI device\n");
1337 		pci_set_drvdata(pdev, NULL);
1338 		return err;
1339 	}
1340 
1341 	err = pci_request_regions(pdev, DRV_NAME);
1342 	if (err) {
1343 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1344 		goto err_disable_device;
1345 	}
1346 
1347 	/* MAP configuration registers */
1348 	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1349 	if (!cgx->reg_base) {
1350 		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1351 		err = -ENOMEM;
1352 		goto err_release_regions;
1353 	}
1354 
1355 	nvec = pci_msix_vec_count(cgx->pdev);
1356 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1357 	if (err < 0 || err != nvec) {
1358 		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1359 			nvec, err);
1360 		goto err_release_regions;
1361 	}
1362 
1363 	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1364 		& CGX_ID_MASK;
1365 
1366 	/* init wq for processing linkup requests */
1367 	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1368 	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1369 	if (!cgx->cgx_cmd_workq) {
1370 		dev_err(dev, "alloc workqueue failed for cgx cmd");
1371 		err = -ENOMEM;
1372 		goto err_free_irq_vectors;
1373 	}
1374 
1375 	list_add(&cgx->cgx_list, &cgx_list);
1376 
1377 
1378 	cgx_populate_features(cgx);
1379 
1380 	mutex_init(&cgx->lock);
1381 
1382 	err = cgx_lmac_init(cgx);
1383 	if (err)
1384 		goto err_release_lmac;
1385 
1386 	return 0;
1387 
1388 err_release_lmac:
1389 	cgx_lmac_exit(cgx);
1390 	list_del(&cgx->cgx_list);
1391 err_free_irq_vectors:
1392 	pci_free_irq_vectors(pdev);
1393 err_release_regions:
1394 	pci_release_regions(pdev);
1395 err_disable_device:
1396 	pci_disable_device(pdev);
1397 	pci_set_drvdata(pdev, NULL);
1398 	return err;
1399 }
1400 
1401 static void cgx_remove(struct pci_dev *pdev)
1402 {
1403 	struct cgx *cgx = pci_get_drvdata(pdev);
1404 
1405 	if (cgx) {
1406 		cgx_lmac_exit(cgx);
1407 		list_del(&cgx->cgx_list);
1408 	}
1409 	pci_free_irq_vectors(pdev);
1410 	pci_release_regions(pdev);
1411 	pci_disable_device(pdev);
1412 	pci_set_drvdata(pdev, NULL);
1413 }
1414 
1415 struct pci_driver cgx_driver = {
1416 	.name = DRV_NAME,
1417 	.id_table = cgx_id_table,
1418 	.probe = cgx_probe,
1419 	.remove = cgx_remove,
1420 };
1421