1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ethtool.h>
18 #include <linux/phy.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 
23 #include "cgx.h"
24 #include "rvu.h"
25 #include "lmac_common.h"
26 
27 #define DRV_NAME	"Marvell-CGX/RPM"
28 #define DRV_STRING      "Marvell CGX/RPM Driver"
29 
30 static LIST_HEAD(cgx_list);
31 
32 /* Convert firmware speed encoding to user format(Mbps) */
33 static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
34 
35 /* Convert firmware lmac type encoding to string */
36 static char *cgx_lmactype_string[LMAC_MODE_MAX];
37 
38 /* CGX PHY management internal APIs */
39 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
40 
41 /* Supported devices */
42 static const struct pci_device_id cgx_id_table[] = {
43 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
44 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
45 	{ 0, }  /* end of table */
46 };
47 
48 MODULE_DEVICE_TABLE(pci, cgx_id_table);
49 
50 static bool is_dev_rpm(void *cgxd)
51 {
52 	struct cgx *cgx = cgxd;
53 
54 	return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
55 }
56 
57 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
58 {
59 	return cgx && test_bit(lmac_id, &cgx->lmac_bmap);
60 }
61 
62 struct mac_ops *get_mac_ops(void *cgxd)
63 {
64 	if (!cgxd)
65 		return cgxd;
66 
67 	return ((struct cgx *)cgxd)->mac_ops;
68 }
69 
70 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
71 {
72 	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
73 	       offset);
74 }
75 
76 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
77 {
78 	return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
79 		     offset);
80 }
81 
82 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
83 {
84 	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
85 		return NULL;
86 
87 	return cgx->lmac_idmap[lmac_id];
88 }
89 
90 int cgx_get_cgxcnt_max(void)
91 {
92 	struct cgx *cgx_dev;
93 	int idmax = -ENODEV;
94 
95 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
96 		if (cgx_dev->cgx_id > idmax)
97 			idmax = cgx_dev->cgx_id;
98 
99 	if (idmax < 0)
100 		return 0;
101 
102 	return idmax + 1;
103 }
104 
105 int cgx_get_lmac_cnt(void *cgxd)
106 {
107 	struct cgx *cgx = cgxd;
108 
109 	if (!cgx)
110 		return -ENODEV;
111 
112 	return cgx->lmac_count;
113 }
114 
115 void *cgx_get_pdata(int cgx_id)
116 {
117 	struct cgx *cgx_dev;
118 
119 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
120 		if (cgx_dev->cgx_id == cgx_id)
121 			return cgx_dev;
122 	}
123 	return NULL;
124 }
125 
126 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
127 {
128 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
129 
130 	cgx_write(cgx_dev, lmac_id, offset, val);
131 }
132 
133 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
134 {
135 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
136 
137 	return cgx_read(cgx_dev, lmac_id, offset);
138 }
139 
140 int cgx_get_cgxid(void *cgxd)
141 {
142 	struct cgx *cgx = cgxd;
143 
144 	if (!cgx)
145 		return -EINVAL;
146 
147 	return cgx->cgx_id;
148 }
149 
150 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
151 {
152 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
153 	u64 cfg;
154 
155 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
156 
157 	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
158 }
159 
160 /* Ensure the required lock for event queue(where asynchronous events are
161  * posted) is acquired before calling this API. Else an asynchronous event(with
162  * latest link status) can reach the destination before this function returns
163  * and could make the link status appear wrong.
164  */
165 int cgx_get_link_info(void *cgxd, int lmac_id,
166 		      struct cgx_link_user_info *linfo)
167 {
168 	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
169 
170 	if (!lmac)
171 		return -ENODEV;
172 
173 	*linfo = lmac->link_info;
174 	return 0;
175 }
176 
177 static u64 mac2u64 (u8 *mac_addr)
178 {
179 	u64 mac = 0;
180 	int index;
181 
182 	for (index = ETH_ALEN - 1; index >= 0; index--)
183 		mac |= ((u64)*mac_addr++) << (8 * index);
184 	return mac;
185 }
186 
187 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
188 {
189 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
190 	struct mac_ops *mac_ops;
191 	u64 cfg;
192 
193 	mac_ops = cgx_dev->mac_ops;
194 	/* copy 6bytes from macaddr */
195 	/* memcpy(&cfg, mac_addr, 6); */
196 
197 	cfg = mac2u64 (mac_addr);
198 
199 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
200 		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
201 
202 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
203 	cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
204 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
205 
206 	return 0;
207 }
208 
209 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
210 {
211 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
212 	struct mac_ops *mac_ops;
213 	u64 cfg;
214 
215 	mac_ops = cgx_dev->mac_ops;
216 
217 	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
218 	return cfg & CGX_RX_DMAC_ADR_MASK;
219 }
220 
221 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
222 {
223 	struct cgx *cgx = cgxd;
224 
225 	if (!is_lmac_valid(cgx, lmac_id))
226 		return -ENODEV;
227 
228 	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
229 	return 0;
230 }
231 
232 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
233 {
234 	struct cgx *cgx = cgxd;
235 	u64 cfg;
236 
237 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
238 	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
239 }
240 
241 /* Configure CGX LMAC in internal loopback mode */
242 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
243 {
244 	struct cgx *cgx = cgxd;
245 	u8 lmac_type;
246 	u64 cfg;
247 
248 	if (!is_lmac_valid(cgx, lmac_id))
249 		return -ENODEV;
250 
251 	lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
252 	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
253 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
254 		if (enable)
255 			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
256 		else
257 			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
258 		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
259 	} else {
260 		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
261 		if (enable)
262 			cfg |= CGXX_SPUX_CONTROL1_LBK;
263 		else
264 			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
265 		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
266 	}
267 	return 0;
268 }
269 
270 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
271 {
272 	struct cgx *cgx = cgx_get_pdata(cgx_id);
273 	struct mac_ops *mac_ops;
274 	u64 cfg = 0;
275 
276 	if (!cgx)
277 		return;
278 
279 	mac_ops = cgx->mac_ops;
280 	if (enable) {
281 		/* Enable promiscuous mode on LMAC */
282 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
283 		cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
284 		cfg |= CGX_DMAC_BCAST_MODE;
285 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
286 
287 		cfg = cgx_read(cgx, 0,
288 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
289 		cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
290 		cgx_write(cgx, 0,
291 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
292 	} else {
293 		/* Disable promiscuous mode */
294 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
295 		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
296 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
297 		cfg = cgx_read(cgx, 0,
298 			       (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
299 		cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
300 		cgx_write(cgx, 0,
301 			  (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
302 	}
303 }
304 
305 /* Enable or disable forwarding received pause frames to Tx block */
306 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
307 {
308 	struct cgx *cgx = cgxd;
309 	u64 cfg;
310 
311 	if (!cgx)
312 		return;
313 
314 	if (enable) {
315 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
316 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
317 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
318 
319 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
320 		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
321 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
322 	} else {
323 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
324 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
325 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
326 
327 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
328 		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
329 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
330 	}
331 }
332 
333 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
334 {
335 	struct cgx *cgx = cgxd;
336 
337 	if (!is_lmac_valid(cgx, lmac_id))
338 		return -ENODEV;
339 	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
340 	return 0;
341 }
342 
343 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
344 {
345 	struct cgx *cgx = cgxd;
346 
347 	if (!is_lmac_valid(cgx, lmac_id))
348 		return -ENODEV;
349 	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
350 	return 0;
351 }
352 
353 u64 cgx_features_get(void *cgxd)
354 {
355 	return ((struct cgx *)cgxd)->hw_features;
356 }
357 
358 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
359 {
360 	if (!linfo->fec)
361 		return 0;
362 
363 	switch (linfo->lmac_type_id) {
364 	case LMAC_MODE_SGMII:
365 	case LMAC_MODE_XAUI:
366 	case LMAC_MODE_RXAUI:
367 	case LMAC_MODE_QSGMII:
368 		return 0;
369 	case LMAC_MODE_10G_R:
370 	case LMAC_MODE_25G_R:
371 	case LMAC_MODE_100G_R:
372 	case LMAC_MODE_USXGMII:
373 		return 1;
374 	case LMAC_MODE_40G_R:
375 		return 4;
376 	case LMAC_MODE_50G_R:
377 		if (linfo->fec == OTX2_FEC_BASER)
378 			return 2;
379 		else
380 			return 1;
381 	default:
382 		return 0;
383 	}
384 }
385 
386 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
387 {
388 	int stats, fec_stats_count = 0;
389 	int corr_reg, uncorr_reg;
390 	struct cgx *cgx = cgxd;
391 
392 	if (!cgx || lmac_id >= cgx->lmac_count)
393 		return -ENODEV;
394 	fec_stats_count =
395 		cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
396 	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
397 		corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
398 		uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
399 	} else {
400 		corr_reg = CGXX_SPUX_RSFEC_CORR;
401 		uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
402 	}
403 	for (stats = 0; stats < fec_stats_count; stats++) {
404 		rsp->fec_corr_blks +=
405 			cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
406 		rsp->fec_uncorr_blks +=
407 			cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
408 	}
409 	return 0;
410 }
411 
412 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
413 {
414 	struct cgx *cgx = cgxd;
415 	u64 cfg;
416 
417 	if (!is_lmac_valid(cgx, lmac_id))
418 		return -ENODEV;
419 
420 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
421 	if (enable)
422 		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
423 	else
424 		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
425 	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
426 	return 0;
427 }
428 
429 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
430 {
431 	struct cgx *cgx = cgxd;
432 	u64 cfg, last;
433 
434 	if (!is_lmac_valid(cgx, lmac_id))
435 		return -ENODEV;
436 
437 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
438 	last = cfg;
439 	if (enable)
440 		cfg |= DATA_PKT_TX_EN;
441 	else
442 		cfg &= ~DATA_PKT_TX_EN;
443 
444 	if (cfg != last)
445 		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
446 	return !!(last & DATA_PKT_TX_EN);
447 }
448 
449 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
450 					 u8 *tx_pause, u8 *rx_pause)
451 {
452 	struct cgx *cgx = cgxd;
453 	u64 cfg;
454 
455 	if (is_dev_rpm(cgx))
456 		return 0;
457 
458 	if (!is_lmac_valid(cgx, lmac_id))
459 		return -ENODEV;
460 
461 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
462 	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
463 
464 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
465 	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
466 	return 0;
467 }
468 
469 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
470 				     u8 tx_pause, u8 rx_pause)
471 {
472 	struct cgx *cgx = cgxd;
473 	u64 cfg;
474 
475 	if (is_dev_rpm(cgx))
476 		return 0;
477 
478 	if (!is_lmac_valid(cgx, lmac_id))
479 		return -ENODEV;
480 
481 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
482 	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
483 	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
484 	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
485 
486 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
487 	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
488 	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
489 	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
490 
491 	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
492 	if (tx_pause) {
493 		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
494 	} else {
495 		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
496 		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
497 	}
498 	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
499 	return 0;
500 }
501 
502 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
503 {
504 	struct cgx *cgx = cgxd;
505 	u64 cfg;
506 
507 	if (!is_lmac_valid(cgx, lmac_id))
508 		return;
509 	if (enable) {
510 		/* Enable receive pause frames */
511 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
512 		cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
513 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
514 
515 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
516 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
517 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
518 
519 		/* Enable pause frames transmission */
520 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
521 		cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
522 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
523 
524 		/* Set pause time and interval */
525 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
526 			  DEFAULT_PAUSE_TIME);
527 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
528 		cfg &= ~0xFFFFULL;
529 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
530 			  cfg | (DEFAULT_PAUSE_TIME / 2));
531 
532 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
533 			  DEFAULT_PAUSE_TIME);
534 
535 		cfg = cgx_read(cgx, lmac_id,
536 			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
537 		cfg &= ~0xFFFFULL;
538 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
539 			  cfg | (DEFAULT_PAUSE_TIME / 2));
540 	} else {
541 		/* ALL pause frames received are completely ignored */
542 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
543 		cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
544 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
545 
546 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
547 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
548 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
549 
550 		/* Disable pause frames transmission */
551 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
552 		cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
553 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
554 	}
555 }
556 
557 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
558 {
559 	struct cgx *cgx = cgxd;
560 	u64 cfg;
561 
562 	if (!cgx)
563 		return;
564 
565 	if (is_dev_rpm(cgx))
566 		return;
567 
568 	if (enable) {
569 		/* Enable inbound PTP timestamping */
570 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
571 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
572 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
573 
574 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
575 		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
576 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
577 	} else {
578 		/* Disable inbound PTP stamping */
579 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
580 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
581 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
582 
583 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
584 		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
585 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
586 	}
587 }
588 
589 /* CGX Firmware interface low level support */
590 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
591 {
592 	struct cgx *cgx = lmac->cgx;
593 	struct device *dev;
594 	int err = 0;
595 	u64 cmd;
596 
597 	/* Ensure no other command is in progress */
598 	err = mutex_lock_interruptible(&lmac->cmd_lock);
599 	if (err)
600 		return err;
601 
602 	/* Ensure command register is free */
603 	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
604 	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
605 		err = -EBUSY;
606 		goto unlock;
607 	}
608 
609 	/* Update ownership in command request */
610 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
611 
612 	/* Mark this lmac as pending, before we start */
613 	lmac->cmd_pend = true;
614 
615 	/* Start command in hardware */
616 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
617 
618 	/* Ensure command is completed without errors */
619 	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
620 				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
621 		dev = &cgx->pdev->dev;
622 		dev_err(dev, "cgx port %d:%d cmd timeout\n",
623 			cgx->cgx_id, lmac->lmac_id);
624 		err = -EIO;
625 		goto unlock;
626 	}
627 
628 	/* we have a valid command response */
629 	smp_rmb(); /* Ensure the latest updates are visible */
630 	*resp = lmac->resp;
631 
632 unlock:
633 	mutex_unlock(&lmac->cmd_lock);
634 
635 	return err;
636 }
637 
638 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
639 {
640 	struct lmac *lmac;
641 	int err;
642 
643 	lmac = lmac_pdata(lmac_id, cgx);
644 	if (!lmac)
645 		return -ENODEV;
646 
647 	err = cgx_fwi_cmd_send(req, resp, lmac);
648 
649 	/* Check for valid response */
650 	if (!err) {
651 		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
652 			return -EIO;
653 		else
654 			return 0;
655 	}
656 
657 	return err;
658 }
659 
660 static inline void cgx_link_usertable_init(void)
661 {
662 	cgx_speed_mbps[CGX_LINK_NONE] = 0;
663 	cgx_speed_mbps[CGX_LINK_10M] = 10;
664 	cgx_speed_mbps[CGX_LINK_100M] = 100;
665 	cgx_speed_mbps[CGX_LINK_1G] = 1000;
666 	cgx_speed_mbps[CGX_LINK_2HG] = 2500;
667 	cgx_speed_mbps[CGX_LINK_5G] = 5000;
668 	cgx_speed_mbps[CGX_LINK_10G] = 10000;
669 	cgx_speed_mbps[CGX_LINK_20G] = 20000;
670 	cgx_speed_mbps[CGX_LINK_25G] = 25000;
671 	cgx_speed_mbps[CGX_LINK_40G] = 40000;
672 	cgx_speed_mbps[CGX_LINK_50G] = 50000;
673 	cgx_speed_mbps[CGX_LINK_80G] = 80000;
674 	cgx_speed_mbps[CGX_LINK_100G] = 100000;
675 
676 	cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
677 	cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
678 	cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
679 	cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
680 	cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
681 	cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
682 	cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
683 	cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
684 	cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
685 	cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
686 }
687 
688 static int cgx_link_usertable_index_map(int speed)
689 {
690 	switch (speed) {
691 	case SPEED_10:
692 		return CGX_LINK_10M;
693 	case SPEED_100:
694 		return CGX_LINK_100M;
695 	case SPEED_1000:
696 		return CGX_LINK_1G;
697 	case SPEED_2500:
698 		return CGX_LINK_2HG;
699 	case SPEED_5000:
700 		return CGX_LINK_5G;
701 	case SPEED_10000:
702 		return CGX_LINK_10G;
703 	case SPEED_20000:
704 		return CGX_LINK_20G;
705 	case SPEED_25000:
706 		return CGX_LINK_25G;
707 	case SPEED_40000:
708 		return CGX_LINK_40G;
709 	case SPEED_50000:
710 		return CGX_LINK_50G;
711 	case 80000:
712 		return CGX_LINK_80G;
713 	case SPEED_100000:
714 		return CGX_LINK_100G;
715 	case SPEED_UNKNOWN:
716 		return CGX_LINK_NONE;
717 	}
718 	return CGX_LINK_NONE;
719 }
720 
721 static void set_mod_args(struct cgx_set_link_mode_args *args,
722 			 u32 speed, u8 duplex, u8 autoneg, u64 mode)
723 {
724 	/* Fill default values incase of user did not pass
725 	 * valid parameters
726 	 */
727 	if (args->duplex == DUPLEX_UNKNOWN)
728 		args->duplex = duplex;
729 	if (args->speed == SPEED_UNKNOWN)
730 		args->speed = speed;
731 	if (args->an == AUTONEG_UNKNOWN)
732 		args->an = autoneg;
733 	args->mode = mode;
734 	args->ports = 0;
735 }
736 
737 static void otx2_map_ethtool_link_modes(u64 bitmask,
738 					struct cgx_set_link_mode_args *args)
739 {
740 	switch (bitmask) {
741 	case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
742 		set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
743 		break;
744 	case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
745 		set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
746 		break;
747 	case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
748 		set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
749 		break;
750 	case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
751 		set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
752 		break;
753 	case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
754 		set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
755 		break;
756 	case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
757 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
758 		break;
759 	case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
760 		set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
761 		break;
762 	case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
763 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
764 		break;
765 	case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
766 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
767 		break;
768 	case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
769 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
770 		break;
771 	case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
772 		set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
773 		break;
774 	case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
775 		set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
776 		break;
777 	case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
778 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
779 		break;
780 	case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
781 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
782 		break;
783 	case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
784 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
785 		break;
786 	case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
787 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
788 		break;
789 	case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
790 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
791 		break;
792 	case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
793 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
794 		break;
795 	case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
796 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
797 		break;
798 	case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
799 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
800 		break;
801 	case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
802 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
803 		break;
804 	case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
805 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
806 		break;
807 	case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
808 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
809 		break;
810 	case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
811 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
812 		break;
813 	case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
814 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
815 		break;
816 	case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
817 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
818 		break;
819 	default:
820 		set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
821 		break;
822 	}
823 }
824 
825 static inline void link_status_user_format(u64 lstat,
826 					   struct cgx_link_user_info *linfo,
827 					   struct cgx *cgx, u8 lmac_id)
828 {
829 	char *lmac_string;
830 
831 	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
832 	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
833 	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
834 	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
835 	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
836 	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
837 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
838 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
839 }
840 
841 /* Hardware event handlers */
842 static inline void cgx_link_change_handler(u64 lstat,
843 					   struct lmac *lmac)
844 {
845 	struct cgx_link_user_info *linfo;
846 	struct cgx *cgx = lmac->cgx;
847 	struct cgx_link_event event;
848 	struct device *dev;
849 	int err_type;
850 
851 	dev = &cgx->pdev->dev;
852 
853 	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
854 	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
855 
856 	event.cgx_id = cgx->cgx_id;
857 	event.lmac_id = lmac->lmac_id;
858 
859 	/* update the local copy of link status */
860 	lmac->link_info = event.link_uinfo;
861 	linfo = &lmac->link_info;
862 
863 	if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
864 		return;
865 
866 	/* Ensure callback doesn't get unregistered until we finish it */
867 	spin_lock(&lmac->event_cb_lock);
868 
869 	if (!lmac->event_cb.notify_link_chg) {
870 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
871 			cgx->cgx_id, lmac->lmac_id);
872 		if (err_type != CGX_ERR_NONE) {
873 			dev_err(dev, "cgx port %d:%d Link error %d\n",
874 				cgx->cgx_id, lmac->lmac_id, err_type);
875 		}
876 		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
877 			 cgx->cgx_id, lmac->lmac_id,
878 			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
879 		goto err;
880 	}
881 
882 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
883 		dev_err(dev, "event notification failure\n");
884 err:
885 	spin_unlock(&lmac->event_cb_lock);
886 }
887 
888 static inline bool cgx_cmdresp_is_linkevent(u64 event)
889 {
890 	u8 id;
891 
892 	id = FIELD_GET(EVTREG_ID, event);
893 	if (id == CGX_CMD_LINK_BRING_UP ||
894 	    id == CGX_CMD_LINK_BRING_DOWN ||
895 	    id == CGX_CMD_MODE_CHANGE)
896 		return true;
897 	else
898 		return false;
899 }
900 
901 static inline bool cgx_event_is_linkevent(u64 event)
902 {
903 	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
904 		return true;
905 	else
906 		return false;
907 }
908 
909 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
910 {
911 	u64 event, offset, clear_bit;
912 	struct lmac *lmac = data;
913 	struct cgx *cgx;
914 
915 	cgx = lmac->cgx;
916 
917 	/* Clear SW_INT for RPM and CMR_INT for CGX */
918 	offset     = cgx->mac_ops->int_register;
919 	clear_bit  = cgx->mac_ops->int_ena_bit;
920 
921 	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
922 
923 	if (!FIELD_GET(EVTREG_ACK, event))
924 		return IRQ_NONE;
925 
926 	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
927 	case CGX_EVT_CMD_RESP:
928 		/* Copy the response. Since only one command is active at a
929 		 * time, there is no way a response can get overwritten
930 		 */
931 		lmac->resp = event;
932 		/* Ensure response is updated before thread context starts */
933 		smp_wmb();
934 
935 		/* There wont be separate events for link change initiated from
936 		 * software; Hence report the command responses as events
937 		 */
938 		if (cgx_cmdresp_is_linkevent(event))
939 			cgx_link_change_handler(event, lmac);
940 
941 		/* Release thread waiting for completion  */
942 		lmac->cmd_pend = false;
943 		wake_up_interruptible(&lmac->wq_cmd_cmplt);
944 		break;
945 	case CGX_EVT_ASYNC:
946 		if (cgx_event_is_linkevent(event))
947 			cgx_link_change_handler(event, lmac);
948 		break;
949 	}
950 
951 	/* Any new event or command response will be posted by firmware
952 	 * only after the current status is acked.
953 	 * Ack the interrupt register as well.
954 	 */
955 	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
956 	cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
957 
958 	return IRQ_HANDLED;
959 }
960 
961 /* APIs for PHY management using CGX firmware interface */
962 
963 /* callback registration for hardware events like link change */
964 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
965 {
966 	struct cgx *cgx = cgxd;
967 	struct lmac *lmac;
968 
969 	lmac = lmac_pdata(lmac_id, cgx);
970 	if (!lmac)
971 		return -ENODEV;
972 
973 	lmac->event_cb = *cb;
974 
975 	return 0;
976 }
977 
978 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
979 {
980 	struct lmac *lmac;
981 	unsigned long flags;
982 	struct cgx *cgx = cgxd;
983 
984 	lmac = lmac_pdata(lmac_id, cgx);
985 	if (!lmac)
986 		return -ENODEV;
987 
988 	spin_lock_irqsave(&lmac->event_cb_lock, flags);
989 	lmac->event_cb.notify_link_chg = NULL;
990 	lmac->event_cb.data = NULL;
991 	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
992 
993 	return 0;
994 }
995 
996 int cgx_get_fwdata_base(u64 *base)
997 {
998 	u64 req = 0, resp;
999 	struct cgx *cgx;
1000 	int first_lmac;
1001 	int err;
1002 
1003 	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1004 	if (!cgx)
1005 		return -ENXIO;
1006 
1007 	first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1008 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1009 	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1010 	if (!err)
1011 		*base = FIELD_GET(RESP_FWD_BASE, resp);
1012 
1013 	return err;
1014 }
1015 
1016 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1017 		      int cgx_id, int lmac_id)
1018 {
1019 	struct cgx *cgx = cgxd;
1020 	u64 req = 0, resp;
1021 
1022 	if (!cgx)
1023 		return -ENODEV;
1024 
1025 	if (args.mode)
1026 		otx2_map_ethtool_link_modes(args.mode, &args);
1027 	if (!args.speed && args.duplex && !args.an)
1028 		return -EINVAL;
1029 
1030 	req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1031 	req = FIELD_SET(CMDMODECHANGE_SPEED,
1032 			cgx_link_usertable_index_map(args.speed), req);
1033 	req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1034 	req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1035 	req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1036 	req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1037 
1038 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1039 }
1040 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1041 {
1042 	u64 req = 0, resp;
1043 	struct cgx *cgx;
1044 	int err = 0;
1045 
1046 	cgx = cgx_get_pdata(cgx_id);
1047 	if (!cgx)
1048 		return -ENXIO;
1049 
1050 	req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1051 	req = FIELD_SET(CMDSETFEC, fec, req);
1052 	err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1053 	if (err)
1054 		return err;
1055 
1056 	cgx->lmac_idmap[lmac_id]->link_info.fec =
1057 			FIELD_GET(RESP_LINKSTAT_FEC, resp);
1058 	return cgx->lmac_idmap[lmac_id]->link_info.fec;
1059 }
1060 
1061 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1062 {
1063 	struct cgx *cgx = cgxd;
1064 	u64 req = 0, resp;
1065 
1066 	if (!cgx)
1067 		return -ENODEV;
1068 
1069 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1070 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1071 }
1072 
1073 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1074 {
1075 	u64 req = 0;
1076 	u64 resp;
1077 
1078 	if (enable)
1079 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1080 	else
1081 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1082 
1083 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1084 }
1085 
1086 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1087 {
1088 	int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1089 	u64 req = 0;
1090 
1091 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1092 	return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1093 }
1094 
1095 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1096 {
1097 	struct device *dev = &cgx->pdev->dev;
1098 	int major_ver, minor_ver;
1099 	u64 resp;
1100 	int err;
1101 
1102 	if (!cgx->lmac_count)
1103 		return 0;
1104 
1105 	err = cgx_fwi_read_version(&resp, cgx);
1106 	if (err)
1107 		return err;
1108 
1109 	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1110 	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1111 	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1112 		major_ver, minor_ver);
1113 	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1114 		return -EIO;
1115 	else
1116 		return 0;
1117 }
1118 
1119 static void cgx_lmac_linkup_work(struct work_struct *work)
1120 {
1121 	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1122 	struct device *dev = &cgx->pdev->dev;
1123 	int i, err;
1124 
1125 	/* Do Link up for all the enabled lmacs */
1126 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1127 		err = cgx_fwi_link_change(cgx, i, true);
1128 		if (err)
1129 			dev_info(dev, "cgx port %d:%d Link up command failed\n",
1130 				 cgx->cgx_id, i);
1131 	}
1132 }
1133 
1134 int cgx_lmac_linkup_start(void *cgxd)
1135 {
1136 	struct cgx *cgx = cgxd;
1137 
1138 	if (!cgx)
1139 		return -ENODEV;
1140 
1141 	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1142 
1143 	return 0;
1144 }
1145 
1146 static void cgx_lmac_get_fifolen(struct cgx *cgx)
1147 {
1148 	u64 cfg;
1149 
1150 	cfg = cgx_read(cgx, 0, CGX_CONST);
1151 	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1152 }
1153 
1154 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1155 				   int cnt, bool req_free)
1156 {
1157 	struct mac_ops *mac_ops = cgx->mac_ops;
1158 	u64 offset, ena_bit;
1159 	unsigned int irq;
1160 	int err;
1161 
1162 	irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1163 				  cnt * mac_ops->irq_offset);
1164 	offset   = mac_ops->int_set_reg;
1165 	ena_bit  = mac_ops->int_ena_bit;
1166 
1167 	if (req_free) {
1168 		free_irq(irq, lmac);
1169 		return 0;
1170 	}
1171 
1172 	err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1173 	if (err)
1174 		return err;
1175 
1176 	/* Enable interrupt */
1177 	cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1178 	return 0;
1179 }
1180 
1181 int cgx_get_nr_lmacs(void *cgxd)
1182 {
1183 	struct cgx *cgx = cgxd;
1184 
1185 	return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1186 }
1187 
1188 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1189 {
1190 	struct cgx *cgx = cgxd;
1191 
1192 	return cgx->lmac_idmap[lmac_index]->lmac_id;
1193 }
1194 
1195 unsigned long cgx_get_lmac_bmap(void *cgxd)
1196 {
1197 	struct cgx *cgx = cgxd;
1198 
1199 	return cgx->lmac_bmap;
1200 }
1201 
1202 static int cgx_lmac_init(struct cgx *cgx)
1203 {
1204 	struct lmac *lmac;
1205 	u64 lmac_list;
1206 	int i, err;
1207 
1208 	cgx_lmac_get_fifolen(cgx);
1209 
1210 	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1211 	/* lmac_list specifies which lmacs are enabled
1212 	 * when bit n is set to 1, LMAC[n] is enabled
1213 	 */
1214 	if (cgx->mac_ops->non_contiguous_serdes_lane)
1215 		lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1216 
1217 	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1218 		cgx->lmac_count = MAX_LMAC_PER_CGX;
1219 
1220 	for (i = 0; i < cgx->lmac_count; i++) {
1221 		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1222 		if (!lmac)
1223 			return -ENOMEM;
1224 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1225 		if (!lmac->name) {
1226 			err = -ENOMEM;
1227 			goto err_lmac_free;
1228 		}
1229 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1230 		if (cgx->mac_ops->non_contiguous_serdes_lane) {
1231 			lmac->lmac_id = __ffs64(lmac_list);
1232 			lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1233 		} else {
1234 			lmac->lmac_id = i;
1235 		}
1236 
1237 		lmac->cgx = cgx;
1238 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
1239 		mutex_init(&lmac->cmd_lock);
1240 		spin_lock_init(&lmac->event_cb_lock);
1241 		err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1242 		if (err)
1243 			goto err_irq;
1244 
1245 		/* Add reference */
1246 		cgx->lmac_idmap[lmac->lmac_id] = lmac;
1247 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1248 		set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1249 	}
1250 
1251 	return cgx_lmac_verify_fwi_version(cgx);
1252 
1253 err_irq:
1254 	kfree(lmac->name);
1255 err_lmac_free:
1256 	kfree(lmac);
1257 	return err;
1258 }
1259 
1260 static int cgx_lmac_exit(struct cgx *cgx)
1261 {
1262 	struct lmac *lmac;
1263 	int i;
1264 
1265 	if (cgx->cgx_cmd_workq) {
1266 		flush_workqueue(cgx->cgx_cmd_workq);
1267 		destroy_workqueue(cgx->cgx_cmd_workq);
1268 		cgx->cgx_cmd_workq = NULL;
1269 	}
1270 
1271 	/* Free all lmac related resources */
1272 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1273 		lmac = cgx->lmac_idmap[i];
1274 		if (!lmac)
1275 			continue;
1276 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1277 		cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1278 		kfree(lmac->name);
1279 		kfree(lmac);
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 static void cgx_populate_features(struct cgx *cgx)
1286 {
1287 	if (is_dev_rpm(cgx))
1288 		cgx->hw_features =  (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
1289 	else
1290 		cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1291 }
1292 
1293 static struct mac_ops	cgx_mac_ops    = {
1294 	.name		=       "cgx",
1295 	.csr_offset	=       0,
1296 	.lmac_offset    =       18,
1297 	.int_register	=       CGXX_CMRX_INT,
1298 	.int_set_reg	=       CGXX_CMRX_INT_ENA_W1S,
1299 	.irq_offset	=       9,
1300 	.int_ena_bit    =       FW_CGX_INT,
1301 	.lmac_fwi	=	CGX_LMAC_FWI,
1302 	.non_contiguous_serdes_lane = false,
1303 	.rx_stats_cnt   =       9,
1304 	.tx_stats_cnt   =       18,
1305 	.get_nr_lmacs	=	cgx_get_nr_lmacs,
1306 	.get_lmac_type  =       cgx_get_lmac_type,
1307 	.mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1308 	.mac_get_rx_stats  =	cgx_get_rx_stats,
1309 	.mac_get_tx_stats  =	cgx_get_tx_stats,
1310 	.mac_enadis_rx_pause_fwding =	cgx_lmac_enadis_rx_pause_fwding,
1311 	.mac_get_pause_frm_status =	cgx_lmac_get_pause_frm_status,
1312 	.mac_enadis_pause_frm =		cgx_lmac_enadis_pause_frm,
1313 	.mac_pause_frm_config =		cgx_lmac_pause_frm_config,
1314 };
1315 
1316 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1317 {
1318 	struct device *dev = &pdev->dev;
1319 	struct cgx *cgx;
1320 	int err, nvec;
1321 
1322 	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1323 	if (!cgx)
1324 		return -ENOMEM;
1325 	cgx->pdev = pdev;
1326 
1327 	pci_set_drvdata(pdev, cgx);
1328 
1329 	/* Use mac_ops to get MAC specific features */
1330 	if (pdev->device == PCI_DEVID_CN10K_RPM)
1331 		cgx->mac_ops = rpm_get_mac_ops();
1332 	else
1333 		cgx->mac_ops = &cgx_mac_ops;
1334 
1335 	err = pci_enable_device(pdev);
1336 	if (err) {
1337 		dev_err(dev, "Failed to enable PCI device\n");
1338 		pci_set_drvdata(pdev, NULL);
1339 		return err;
1340 	}
1341 
1342 	err = pci_request_regions(pdev, DRV_NAME);
1343 	if (err) {
1344 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1345 		goto err_disable_device;
1346 	}
1347 
1348 	/* MAP configuration registers */
1349 	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1350 	if (!cgx->reg_base) {
1351 		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1352 		err = -ENOMEM;
1353 		goto err_release_regions;
1354 	}
1355 
1356 	nvec = pci_msix_vec_count(cgx->pdev);
1357 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1358 	if (err < 0 || err != nvec) {
1359 		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1360 			nvec, err);
1361 		goto err_release_regions;
1362 	}
1363 
1364 	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1365 		& CGX_ID_MASK;
1366 
1367 	/* init wq for processing linkup requests */
1368 	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1369 	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1370 	if (!cgx->cgx_cmd_workq) {
1371 		dev_err(dev, "alloc workqueue failed for cgx cmd");
1372 		err = -ENOMEM;
1373 		goto err_free_irq_vectors;
1374 	}
1375 
1376 	list_add(&cgx->cgx_list, &cgx_list);
1377 
1378 	cgx_link_usertable_init();
1379 
1380 	cgx_populate_features(cgx);
1381 
1382 	mutex_init(&cgx->lock);
1383 
1384 	err = cgx_lmac_init(cgx);
1385 	if (err)
1386 		goto err_release_lmac;
1387 
1388 	return 0;
1389 
1390 err_release_lmac:
1391 	cgx_lmac_exit(cgx);
1392 	list_del(&cgx->cgx_list);
1393 err_free_irq_vectors:
1394 	pci_free_irq_vectors(pdev);
1395 err_release_regions:
1396 	pci_release_regions(pdev);
1397 err_disable_device:
1398 	pci_disable_device(pdev);
1399 	pci_set_drvdata(pdev, NULL);
1400 	return err;
1401 }
1402 
1403 static void cgx_remove(struct pci_dev *pdev)
1404 {
1405 	struct cgx *cgx = pci_get_drvdata(pdev);
1406 
1407 	if (cgx) {
1408 		cgx_lmac_exit(cgx);
1409 		list_del(&cgx->cgx_list);
1410 	}
1411 	pci_free_irq_vectors(pdev);
1412 	pci_release_regions(pdev);
1413 	pci_disable_device(pdev);
1414 	pci_set_drvdata(pdev, NULL);
1415 }
1416 
1417 struct pci_driver cgx_driver = {
1418 	.name = DRV_NAME,
1419 	.id_table = cgx_id_table,
1420 	.probe = cgx_probe,
1421 	.remove = cgx_remove,
1422 };
1423