xref: /openbmc/linux/drivers/net/ethernet/cavium/thunder/thunder_bgx.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  * Copyright (C) 2015 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/module.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/phy.h>
16 #include <linux/of.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19 
20 #include "nic_reg.h"
21 #include "nic.h"
22 #include "thunder_bgx.h"
23 
24 #define DRV_NAME	"thunder-BGX"
25 #define DRV_VERSION	"1.0"
26 
27 struct lmac {
28 	struct bgx		*bgx;
29 	int			dmac;
30 	u8			mac[ETH_ALEN];
31 	u8                      lmac_type;
32 	u8                      lane_to_sds;
33 	bool                    use_training;
34 	bool                    autoneg;
35 	bool			link_up;
36 	int			lmacid; /* ID within BGX */
37 	int			lmacid_bd; /* ID on board */
38 	struct net_device       netdev;
39 	struct phy_device       *phydev;
40 	unsigned int            last_duplex;
41 	unsigned int            last_link;
42 	unsigned int            last_speed;
43 	bool			is_sgmii;
44 	struct delayed_work	dwork;
45 	struct workqueue_struct *check_link;
46 };
47 
48 struct bgx {
49 	u8			bgx_id;
50 	struct	lmac		lmac[MAX_LMAC_PER_BGX];
51 	u8			lmac_count;
52 	u8			max_lmac;
53 	u8                      acpi_lmac_idx;
54 	void __iomem		*reg_base;
55 	struct pci_dev		*pdev;
56 	bool                    is_dlm;
57 	bool                    is_rgx;
58 };
59 
60 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
61 static int lmac_count; /* Total no of LMACs in system */
62 
63 static int bgx_xaui_check_link(struct lmac *lmac);
64 
65 /* Supported devices */
66 static const struct pci_device_id bgx_id_table[] = {
67 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
68 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
69 	{ 0, }  /* end of table */
70 };
71 
72 MODULE_AUTHOR("Cavium Inc");
73 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
74 MODULE_LICENSE("GPL v2");
75 MODULE_VERSION(DRV_VERSION);
76 MODULE_DEVICE_TABLE(pci, bgx_id_table);
77 
78 /* The Cavium ThunderX network controller can *only* be found in SoCs
79  * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
80  * registers on this platform are implicitly strongly ordered with respect
81  * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
82  * with no memory barriers in this driver.  The readq()/writeq() functions add
83  * explicit ordering operation which in this case are redundant, and only
84  * add overhead.
85  */
86 
87 /* Register read/write APIs */
88 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
89 {
90 	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
91 
92 	return readq_relaxed(addr);
93 }
94 
95 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
96 {
97 	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
98 
99 	writeq_relaxed(val, addr);
100 }
101 
102 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
103 {
104 	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
105 
106 	writeq_relaxed(val | readq_relaxed(addr), addr);
107 }
108 
109 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
110 {
111 	int timeout = 100;
112 	u64 reg_val;
113 
114 	while (timeout) {
115 		reg_val = bgx_reg_read(bgx, lmac, reg);
116 		if (zero && !(reg_val & mask))
117 			return 0;
118 		if (!zero && (reg_val & mask))
119 			return 0;
120 		usleep_range(1000, 2000);
121 		timeout--;
122 	}
123 	return 1;
124 }
125 
126 static int max_bgx_per_node;
127 static void set_max_bgx_per_node(struct pci_dev *pdev)
128 {
129 	u16 sdevid;
130 
131 	if (max_bgx_per_node)
132 		return;
133 
134 	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
135 	switch (sdevid) {
136 	case PCI_SUBSYS_DEVID_81XX_BGX:
137 		max_bgx_per_node = MAX_BGX_PER_CN81XX;
138 		break;
139 	case PCI_SUBSYS_DEVID_83XX_BGX:
140 		max_bgx_per_node = MAX_BGX_PER_CN83XX;
141 		break;
142 	case PCI_SUBSYS_DEVID_88XX_BGX:
143 	default:
144 		max_bgx_per_node = MAX_BGX_PER_CN88XX;
145 		break;
146 	}
147 }
148 
149 static struct bgx *get_bgx(int node, int bgx_idx)
150 {
151 	int idx = (node * max_bgx_per_node) + bgx_idx;
152 
153 	return bgx_vnic[idx];
154 }
155 
156 /* Return number of BGX present in HW */
157 unsigned bgx_get_map(int node)
158 {
159 	int i;
160 	unsigned map = 0;
161 
162 	for (i = 0; i < max_bgx_per_node; i++) {
163 		if (bgx_vnic[(node * max_bgx_per_node) + i])
164 			map |= (1 << i);
165 	}
166 
167 	return map;
168 }
169 EXPORT_SYMBOL(bgx_get_map);
170 
171 /* Return number of LMAC configured for this BGX */
172 int bgx_get_lmac_count(int node, int bgx_idx)
173 {
174 	struct bgx *bgx;
175 
176 	bgx = get_bgx(node, bgx_idx);
177 	if (bgx)
178 		return bgx->lmac_count;
179 
180 	return 0;
181 }
182 EXPORT_SYMBOL(bgx_get_lmac_count);
183 
184 /* Returns the current link status of LMAC */
185 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
186 {
187 	struct bgx_link_status *link = (struct bgx_link_status *)status;
188 	struct bgx *bgx;
189 	struct lmac *lmac;
190 
191 	bgx = get_bgx(node, bgx_idx);
192 	if (!bgx)
193 		return;
194 
195 	lmac = &bgx->lmac[lmacid];
196 	link->mac_type = lmac->lmac_type;
197 	link->link_up = lmac->link_up;
198 	link->duplex = lmac->last_duplex;
199 	link->speed = lmac->last_speed;
200 }
201 EXPORT_SYMBOL(bgx_get_lmac_link_state);
202 
203 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
204 {
205 	struct bgx *bgx = get_bgx(node, bgx_idx);
206 
207 	if (bgx)
208 		return bgx->lmac[lmacid].mac;
209 
210 	return NULL;
211 }
212 EXPORT_SYMBOL(bgx_get_lmac_mac);
213 
214 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
215 {
216 	struct bgx *bgx = get_bgx(node, bgx_idx);
217 
218 	if (!bgx)
219 		return;
220 
221 	ether_addr_copy(bgx->lmac[lmacid].mac, mac);
222 }
223 EXPORT_SYMBOL(bgx_set_lmac_mac);
224 
225 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
226 {
227 	struct bgx *bgx = get_bgx(node, bgx_idx);
228 	struct lmac *lmac;
229 	u64 cfg;
230 
231 	if (!bgx)
232 		return;
233 	lmac = &bgx->lmac[lmacid];
234 
235 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
236 	if (enable)
237 		cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
238 	else
239 		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
240 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
241 
242 	if (bgx->is_rgx)
243 		xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
244 }
245 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
246 
247 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
248 {
249 	struct pfc *pfc = (struct pfc *)pause;
250 	struct bgx *bgx = get_bgx(node, bgx_idx);
251 	struct lmac *lmac;
252 	u64 cfg;
253 
254 	if (!bgx)
255 		return;
256 	lmac = &bgx->lmac[lmacid];
257 	if (lmac->is_sgmii)
258 		return;
259 
260 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
261 	pfc->fc_rx = cfg & RX_EN;
262 	pfc->fc_tx = cfg & TX_EN;
263 	pfc->autoneg = 0;
264 }
265 EXPORT_SYMBOL(bgx_lmac_get_pfc);
266 
267 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
268 {
269 	struct pfc *pfc = (struct pfc *)pause;
270 	struct bgx *bgx = get_bgx(node, bgx_idx);
271 	struct lmac *lmac;
272 	u64 cfg;
273 
274 	if (!bgx)
275 		return;
276 	lmac = &bgx->lmac[lmacid];
277 	if (lmac->is_sgmii)
278 		return;
279 
280 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
281 	cfg &= ~(RX_EN | TX_EN);
282 	cfg |= (pfc->fc_rx ? RX_EN : 0x00);
283 	cfg |= (pfc->fc_tx ? TX_EN : 0x00);
284 	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
285 }
286 EXPORT_SYMBOL(bgx_lmac_set_pfc);
287 
288 static void bgx_sgmii_change_link_state(struct lmac *lmac)
289 {
290 	struct bgx *bgx = lmac->bgx;
291 	u64 cmr_cfg;
292 	u64 port_cfg = 0;
293 	u64 misc_ctl = 0;
294 
295 	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
296 	cmr_cfg &= ~CMR_EN;
297 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
298 
299 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
300 	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
301 
302 	if (lmac->link_up) {
303 		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
304 		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
305 		port_cfg |=  (lmac->last_duplex << 2);
306 	} else {
307 		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
308 	}
309 
310 	switch (lmac->last_speed) {
311 	case 10:
312 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
313 		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
314 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
315 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
316 		misc_ctl |= 50; /* samp_pt */
317 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
318 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
319 		break;
320 	case 100:
321 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
322 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
323 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
324 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
325 		misc_ctl |= 5; /* samp_pt */
326 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
327 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
328 		break;
329 	case 1000:
330 		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
331 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
332 		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
333 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
334 		misc_ctl |= 1; /* samp_pt */
335 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
336 		if (lmac->last_duplex)
337 			bgx_reg_write(bgx, lmac->lmacid,
338 				      BGX_GMP_GMI_TXX_BURST, 0);
339 		else
340 			bgx_reg_write(bgx, lmac->lmacid,
341 				      BGX_GMP_GMI_TXX_BURST, 8192);
342 		break;
343 	default:
344 		break;
345 	}
346 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
347 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
348 
349 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
350 
351 	/* Re-enable lmac */
352 	cmr_cfg |= CMR_EN;
353 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
354 
355 	if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
356 		xcv_setup_link(lmac->link_up, lmac->last_speed);
357 }
358 
359 static void bgx_lmac_handler(struct net_device *netdev)
360 {
361 	struct lmac *lmac = container_of(netdev, struct lmac, netdev);
362 	struct phy_device *phydev;
363 	int link_changed = 0;
364 
365 	if (!lmac)
366 		return;
367 
368 	phydev = lmac->phydev;
369 
370 	if (!phydev->link && lmac->last_link)
371 		link_changed = -1;
372 
373 	if (phydev->link &&
374 	    (lmac->last_duplex != phydev->duplex ||
375 	     lmac->last_link != phydev->link ||
376 	     lmac->last_speed != phydev->speed)) {
377 			link_changed = 1;
378 	}
379 
380 	lmac->last_link = phydev->link;
381 	lmac->last_speed = phydev->speed;
382 	lmac->last_duplex = phydev->duplex;
383 
384 	if (!link_changed)
385 		return;
386 
387 	if (link_changed > 0)
388 		lmac->link_up = true;
389 	else
390 		lmac->link_up = false;
391 
392 	if (lmac->is_sgmii)
393 		bgx_sgmii_change_link_state(lmac);
394 	else
395 		bgx_xaui_check_link(lmac);
396 }
397 
398 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
399 {
400 	struct bgx *bgx;
401 
402 	bgx = get_bgx(node, bgx_idx);
403 	if (!bgx)
404 		return 0;
405 
406 	if (idx > 8)
407 		lmac = 0;
408 	return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
409 }
410 EXPORT_SYMBOL(bgx_get_rx_stats);
411 
412 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
413 {
414 	struct bgx *bgx;
415 
416 	bgx = get_bgx(node, bgx_idx);
417 	if (!bgx)
418 		return 0;
419 
420 	return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
421 }
422 EXPORT_SYMBOL(bgx_get_tx_stats);
423 
424 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
425 {
426 	u64 offset;
427 
428 	while (bgx->lmac[lmac].dmac > 0) {
429 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
430 			(lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
431 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
432 		bgx->lmac[lmac].dmac--;
433 	}
434 }
435 
436 /* Configure BGX LMAC in internal loopback mode */
437 void bgx_lmac_internal_loopback(int node, int bgx_idx,
438 				int lmac_idx, bool enable)
439 {
440 	struct bgx *bgx;
441 	struct lmac *lmac;
442 	u64    cfg;
443 
444 	bgx = get_bgx(node, bgx_idx);
445 	if (!bgx)
446 		return;
447 
448 	lmac = &bgx->lmac[lmac_idx];
449 	if (lmac->is_sgmii) {
450 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
451 		if (enable)
452 			cfg |= PCS_MRX_CTL_LOOPBACK1;
453 		else
454 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
455 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
456 	} else {
457 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
458 		if (enable)
459 			cfg |= SPU_CTL_LOOPBACK;
460 		else
461 			cfg &= ~SPU_CTL_LOOPBACK;
462 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
463 	}
464 }
465 EXPORT_SYMBOL(bgx_lmac_internal_loopback);
466 
467 static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
468 {
469 	int lmacid = lmac->lmacid;
470 	u64 cfg;
471 
472 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
473 	/* max packet size */
474 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
475 
476 	/* Disable frame alignment if using preamble */
477 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
478 	if (cfg & 1)
479 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
480 
481 	/* Enable lmac */
482 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
483 
484 	/* PCS reset */
485 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
486 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
487 			 PCS_MRX_CTL_RESET, true)) {
488 		dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
489 		return -1;
490 	}
491 
492 	/* power down, reset autoneg, autoneg enable */
493 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
494 	cfg &= ~PCS_MRX_CTL_PWR_DN;
495 	cfg |= PCS_MRX_CTL_RST_AN;
496 	if (lmac->phydev) {
497 		cfg |= PCS_MRX_CTL_AN_EN;
498 	} else {
499 		/* In scenarios where PHY driver is not present or it's a
500 		 * non-standard PHY, FW sets AN_EN to inform Linux driver
501 		 * to do auto-neg and link polling or not.
502 		 */
503 		if (cfg & PCS_MRX_CTL_AN_EN)
504 			lmac->autoneg = true;
505 	}
506 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
507 
508 	if (lmac->lmac_type == BGX_MODE_QSGMII) {
509 		/* Disable disparity check for QSGMII */
510 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
511 		cfg &= ~PCS_MISC_CTL_DISP_EN;
512 		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
513 		return 0;
514 	}
515 
516 	if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
517 		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
518 				 PCS_MRX_STATUS_AN_CPT, false)) {
519 			dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
520 			return -1;
521 		}
522 	}
523 
524 	return 0;
525 }
526 
527 static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
528 {
529 	u64 cfg;
530 	int lmacid = lmac->lmacid;
531 
532 	/* Reset SPU */
533 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
534 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
535 		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
536 		return -1;
537 	}
538 
539 	/* Disable LMAC */
540 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
541 	cfg &= ~CMR_EN;
542 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
543 
544 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
545 	/* Set interleaved running disparity for RXAUI */
546 	if (lmac->lmac_type == BGX_MODE_RXAUI)
547 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
548 			       SPU_MISC_CTL_INTLV_RDISP);
549 
550 	/* Clear receive packet disable */
551 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
552 	cfg &= ~SPU_MISC_CTL_RX_DIS;
553 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
554 
555 	/* clear all interrupts */
556 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
557 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
558 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
559 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
560 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
561 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
562 
563 	if (lmac->use_training) {
564 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
565 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
566 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
567 		/* training enable */
568 		bgx_reg_modify(bgx, lmacid,
569 			       BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
570 	}
571 
572 	/* Append FCS to each packet */
573 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
574 
575 	/* Disable forward error correction */
576 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
577 	cfg &= ~SPU_FEC_CTL_FEC_EN;
578 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
579 
580 	/* Disable autoneg */
581 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
582 	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
583 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
584 
585 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
586 	if (lmac->lmac_type == BGX_MODE_10G_KR)
587 		cfg |= (1 << 23);
588 	else if (lmac->lmac_type == BGX_MODE_40G_KR)
589 		cfg |= (1 << 24);
590 	else
591 		cfg &= ~((1 << 23) | (1 << 24));
592 	cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
593 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
594 
595 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
596 	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
597 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
598 
599 	/* Enable lmac */
600 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
601 
602 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
603 	cfg &= ~SPU_CTL_LOW_POWER;
604 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
605 
606 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
607 	cfg &= ~SMU_TX_CTL_UNI_EN;
608 	cfg |= SMU_TX_CTL_DIC_EN;
609 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
610 
611 	/* Enable receive and transmission of pause frames */
612 	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
613 		      BCK_EN | DRP_EN | TX_EN | RX_EN));
614 	/* Configure pause time and interval */
615 	bgx_reg_write(bgx, lmacid,
616 		      BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
617 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
618 	cfg &= ~0xFFFFull;
619 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
620 		      cfg | (DEFAULT_PAUSE_TIME - 0x1000));
621 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
622 
623 	/* take lmac_count into account */
624 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
625 	/* max packet size */
626 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
627 
628 	return 0;
629 }
630 
631 static int bgx_xaui_check_link(struct lmac *lmac)
632 {
633 	struct bgx *bgx = lmac->bgx;
634 	int lmacid = lmac->lmacid;
635 	int lmac_type = lmac->lmac_type;
636 	u64 cfg;
637 
638 	if (lmac->use_training) {
639 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
640 		if (!(cfg & (1ull << 13))) {
641 			cfg = (1ull << 13) | (1ull << 14);
642 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
643 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
644 			cfg |= (1ull << 0);
645 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
646 			return -1;
647 		}
648 	}
649 
650 	/* wait for PCS to come out of reset */
651 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
652 		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
653 		return -1;
654 	}
655 
656 	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
657 	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
658 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
659 				 SPU_BR_STATUS_BLK_LOCK, false)) {
660 			dev_err(&bgx->pdev->dev,
661 				"SPU_BR_STATUS_BLK_LOCK not completed\n");
662 			return -1;
663 		}
664 	} else {
665 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
666 				 SPU_BX_STATUS_RX_ALIGN, false)) {
667 			dev_err(&bgx->pdev->dev,
668 				"SPU_BX_STATUS_RX_ALIGN not completed\n");
669 			return -1;
670 		}
671 	}
672 
673 	/* Clear rcvflt bit (latching high) and read it back */
674 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
675 		bgx_reg_modify(bgx, lmacid,
676 			       BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
677 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
678 		dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
679 		if (lmac->use_training) {
680 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
681 			if (!(cfg & (1ull << 13))) {
682 				cfg = (1ull << 13) | (1ull << 14);
683 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
684 				cfg = bgx_reg_read(bgx, lmacid,
685 						   BGX_SPUX_BR_PMD_CRTL);
686 				cfg |= (1ull << 0);
687 				bgx_reg_write(bgx, lmacid,
688 					      BGX_SPUX_BR_PMD_CRTL, cfg);
689 				return -1;
690 			}
691 		}
692 		return -1;
693 	}
694 
695 	/* Wait for BGX RX to be idle */
696 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
697 		dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
698 		return -1;
699 	}
700 
701 	/* Wait for BGX TX to be idle */
702 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
703 		dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
704 		return -1;
705 	}
706 
707 	/* Check for MAC RX faults */
708 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
709 	/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
710 	cfg &= SMU_RX_CTL_STATUS;
711 	if (!cfg)
712 		return 0;
713 
714 	/* Rx local/remote fault seen.
715 	 * Do lmac reinit to see if condition recovers
716 	 */
717 	bgx_lmac_xaui_init(bgx, lmac);
718 
719 	return -1;
720 }
721 
722 static void bgx_poll_for_sgmii_link(struct lmac *lmac)
723 {
724 	u64 pcs_link, an_result;
725 	u8 speed;
726 
727 	pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
728 				BGX_GMP_PCS_MRX_STATUS);
729 
730 	/*Link state bit is sticky, read it again*/
731 	if (!(pcs_link & PCS_MRX_STATUS_LINK))
732 		pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
733 					BGX_GMP_PCS_MRX_STATUS);
734 
735 	if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
736 			 PCS_MRX_STATUS_AN_CPT, false)) {
737 		lmac->link_up = false;
738 		lmac->last_speed = SPEED_UNKNOWN;
739 		lmac->last_duplex = DUPLEX_UNKNOWN;
740 		goto next_poll;
741 	}
742 
743 	lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
744 	an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
745 				 BGX_GMP_PCS_ANX_AN_RESULTS);
746 
747 	speed = (an_result >> 3) & 0x3;
748 	lmac->last_duplex = (an_result >> 1) & 0x1;
749 	switch (speed) {
750 	case 0:
751 		lmac->last_speed = 10;
752 		break;
753 	case 1:
754 		lmac->last_speed = 100;
755 		break;
756 	case 2:
757 		lmac->last_speed = 1000;
758 		break;
759 	default:
760 		lmac->link_up = false;
761 		lmac->last_speed = SPEED_UNKNOWN;
762 		lmac->last_duplex = DUPLEX_UNKNOWN;
763 		break;
764 	}
765 
766 next_poll:
767 
768 	if (lmac->last_link != lmac->link_up) {
769 		if (lmac->link_up)
770 			bgx_sgmii_change_link_state(lmac);
771 		lmac->last_link = lmac->link_up;
772 	}
773 
774 	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
775 }
776 
777 static void bgx_poll_for_link(struct work_struct *work)
778 {
779 	struct lmac *lmac;
780 	u64 spu_link, smu_link;
781 
782 	lmac = container_of(work, struct lmac, dwork.work);
783 	if (lmac->is_sgmii) {
784 		bgx_poll_for_sgmii_link(lmac);
785 		return;
786 	}
787 
788 	/* Receive link is latching low. Force it high and verify it */
789 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
790 		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
791 	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
792 		     SPU_STATUS1_RCV_LNK, false);
793 
794 	spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
795 	smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
796 
797 	if ((spu_link & SPU_STATUS1_RCV_LNK) &&
798 	    !(smu_link & SMU_RX_CTL_STATUS)) {
799 		lmac->link_up = 1;
800 		if (lmac->lmac_type == BGX_MODE_XLAUI)
801 			lmac->last_speed = 40000;
802 		else
803 			lmac->last_speed = 10000;
804 		lmac->last_duplex = 1;
805 	} else {
806 		lmac->link_up = 0;
807 		lmac->last_speed = SPEED_UNKNOWN;
808 		lmac->last_duplex = DUPLEX_UNKNOWN;
809 	}
810 
811 	if (lmac->last_link != lmac->link_up) {
812 		if (lmac->link_up) {
813 			if (bgx_xaui_check_link(lmac)) {
814 				/* Errors, clear link_up state */
815 				lmac->link_up = 0;
816 				lmac->last_speed = SPEED_UNKNOWN;
817 				lmac->last_duplex = DUPLEX_UNKNOWN;
818 			}
819 		}
820 		lmac->last_link = lmac->link_up;
821 	}
822 
823 	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
824 }
825 
826 static int phy_interface_mode(u8 lmac_type)
827 {
828 	if (lmac_type == BGX_MODE_QSGMII)
829 		return PHY_INTERFACE_MODE_QSGMII;
830 	if (lmac_type == BGX_MODE_RGMII)
831 		return PHY_INTERFACE_MODE_RGMII;
832 
833 	return PHY_INTERFACE_MODE_SGMII;
834 }
835 
836 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
837 {
838 	struct lmac *lmac;
839 	u64 cfg;
840 
841 	lmac = &bgx->lmac[lmacid];
842 	lmac->bgx = bgx;
843 
844 	if ((lmac->lmac_type == BGX_MODE_SGMII) ||
845 	    (lmac->lmac_type == BGX_MODE_QSGMII) ||
846 	    (lmac->lmac_type == BGX_MODE_RGMII)) {
847 		lmac->is_sgmii = 1;
848 		if (bgx_lmac_sgmii_init(bgx, lmac))
849 			return -1;
850 	} else {
851 		lmac->is_sgmii = 0;
852 		if (bgx_lmac_xaui_init(bgx, lmac))
853 			return -1;
854 	}
855 
856 	if (lmac->is_sgmii) {
857 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
858 		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
859 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
860 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
861 	} else {
862 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
863 		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
864 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
865 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
866 	}
867 
868 	/* Enable lmac */
869 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
870 
871 	/* Restore default cfg, incase low level firmware changed it */
872 	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
873 
874 	if ((lmac->lmac_type != BGX_MODE_XFI) &&
875 	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
876 	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
877 	    (lmac->lmac_type != BGX_MODE_10G_KR)) {
878 		if (!lmac->phydev) {
879 			if (lmac->autoneg) {
880 				bgx_reg_write(bgx, lmacid,
881 					      BGX_GMP_PCS_LINKX_TIMER,
882 					      PCS_LINKX_TIMER_COUNT);
883 				goto poll;
884 			} else {
885 				/* Default to below link speed and duplex */
886 				lmac->link_up = true;
887 				lmac->last_speed = 1000;
888 				lmac->last_duplex = 1;
889 				bgx_sgmii_change_link_state(lmac);
890 				return 0;
891 			}
892 		}
893 		lmac->phydev->dev_flags = 0;
894 
895 		if (phy_connect_direct(&lmac->netdev, lmac->phydev,
896 				       bgx_lmac_handler,
897 				       phy_interface_mode(lmac->lmac_type)))
898 			return -ENODEV;
899 
900 		phy_start_aneg(lmac->phydev);
901 		return 0;
902 	}
903 
904 poll:
905 	lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
906 					   WQ_MEM_RECLAIM, 1);
907 	if (!lmac->check_link)
908 		return -ENOMEM;
909 	INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
910 	queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
911 
912 	return 0;
913 }
914 
915 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
916 {
917 	struct lmac *lmac;
918 	u64 cfg;
919 
920 	lmac = &bgx->lmac[lmacid];
921 	if (lmac->check_link) {
922 		/* Destroy work queue */
923 		cancel_delayed_work_sync(&lmac->dwork);
924 		destroy_workqueue(lmac->check_link);
925 	}
926 
927 	/* Disable packet reception */
928 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
929 	cfg &= ~CMR_PKT_RX_EN;
930 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
931 
932 	/* Give chance for Rx/Tx FIFO to get drained */
933 	bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
934 	bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
935 
936 	/* Disable packet transmission */
937 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
938 	cfg &= ~CMR_PKT_TX_EN;
939 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
940 
941 	/* Disable serdes lanes */
942         if (!lmac->is_sgmii)
943                 bgx_reg_modify(bgx, lmacid,
944                                BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
945         else
946                 bgx_reg_modify(bgx, lmacid,
947                                BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
948 
949 	/* Disable LMAC */
950 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
951 	cfg &= ~CMR_EN;
952 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
953 
954 	bgx_flush_dmac_addrs(bgx, lmacid);
955 
956 	if ((lmac->lmac_type != BGX_MODE_XFI) &&
957 	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
958 	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
959 	    (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
960 		phy_disconnect(lmac->phydev);
961 
962 	lmac->phydev = NULL;
963 }
964 
965 static void bgx_init_hw(struct bgx *bgx)
966 {
967 	int i;
968 	struct lmac *lmac;
969 
970 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
971 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
972 		dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
973 
974 	/* Set lmac type and lane2serdes mapping */
975 	for (i = 0; i < bgx->lmac_count; i++) {
976 		lmac = &bgx->lmac[i];
977 		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
978 			      (lmac->lmac_type << 8) | lmac->lane_to_sds);
979 		bgx->lmac[i].lmacid_bd = lmac_count;
980 		lmac_count++;
981 	}
982 
983 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
984 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
985 
986 	/* Set the backpressure AND mask */
987 	for (i = 0; i < bgx->lmac_count; i++)
988 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
989 			       ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
990 			       (i * MAX_BGX_CHANS_PER_LMAC));
991 
992 	/* Disable all MAC filtering */
993 	for (i = 0; i < RX_DMAC_COUNT; i++)
994 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
995 
996 	/* Disable MAC steering (NCSI traffic) */
997 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
998 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
999 }
1000 
1001 static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
1002 {
1003 	return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
1004 }
1005 
1006 static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1007 {
1008 	struct device *dev = &bgx->pdev->dev;
1009 	struct lmac *lmac;
1010 	char str[20];
1011 
1012 	if (!bgx->is_dlm && lmacid)
1013 		return;
1014 
1015 	lmac = &bgx->lmac[lmacid];
1016 	if (!bgx->is_dlm)
1017 		sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
1018 	else
1019 		sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
1020 
1021 	switch (lmac->lmac_type) {
1022 	case BGX_MODE_SGMII:
1023 		dev_info(dev, "%s: SGMII\n", (char *)str);
1024 		break;
1025 	case BGX_MODE_XAUI:
1026 		dev_info(dev, "%s: XAUI\n", (char *)str);
1027 		break;
1028 	case BGX_MODE_RXAUI:
1029 		dev_info(dev, "%s: RXAUI\n", (char *)str);
1030 		break;
1031 	case BGX_MODE_XFI:
1032 		if (!lmac->use_training)
1033 			dev_info(dev, "%s: XFI\n", (char *)str);
1034 		else
1035 			dev_info(dev, "%s: 10G_KR\n", (char *)str);
1036 		break;
1037 	case BGX_MODE_XLAUI:
1038 		if (!lmac->use_training)
1039 			dev_info(dev, "%s: XLAUI\n", (char *)str);
1040 		else
1041 			dev_info(dev, "%s: 40G_KR4\n", (char *)str);
1042 		break;
1043 	case BGX_MODE_QSGMII:
1044 		dev_info(dev, "%s: QSGMII\n", (char *)str);
1045 		break;
1046 	case BGX_MODE_RGMII:
1047 		dev_info(dev, "%s: RGMII\n", (char *)str);
1048 		break;
1049 	case BGX_MODE_INVALID:
1050 		/* Nothing to do */
1051 		break;
1052 	}
1053 }
1054 
1055 static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
1056 {
1057 	switch (lmac->lmac_type) {
1058 	case BGX_MODE_SGMII:
1059 	case BGX_MODE_XFI:
1060 		lmac->lane_to_sds = lmac->lmacid;
1061 		break;
1062 	case BGX_MODE_XAUI:
1063 	case BGX_MODE_XLAUI:
1064 	case BGX_MODE_RGMII:
1065 		lmac->lane_to_sds = 0xE4;
1066 		break;
1067 	case BGX_MODE_RXAUI:
1068 		lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
1069 		break;
1070 	case BGX_MODE_QSGMII:
1071 		/* There is no way to determine if DLM0/2 is QSGMII or
1072 		 * DLM1/3 is configured to QSGMII as bootloader will
1073 		 * configure all LMACs, so take whatever is configured
1074 		 * by low level firmware.
1075 		 */
1076 		lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
1077 		break;
1078 	default:
1079 		lmac->lane_to_sds = 0;
1080 		break;
1081 	}
1082 }
1083 
1084 static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
1085 {
1086 	if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
1087 	    (lmac->lmac_type != BGX_MODE_40G_KR)) {
1088 		lmac->use_training = 0;
1089 		return;
1090 	}
1091 
1092 	lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
1093 							SPU_PMD_CRTL_TRAIN_EN;
1094 }
1095 
1096 static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
1097 {
1098 	struct lmac *lmac;
1099 	u64 cmr_cfg;
1100 	u8 lmac_type;
1101 	u8 lane_to_sds;
1102 
1103 	lmac = &bgx->lmac[idx];
1104 
1105 	if (!bgx->is_dlm || bgx->is_rgx) {
1106 		/* Read LMAC0 type to figure out QLM mode
1107 		 * This is configured by low level firmware
1108 		 */
1109 		cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1110 		lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
1111 		if (bgx->is_rgx)
1112 			lmac->lmac_type = BGX_MODE_RGMII;
1113 		lmac_set_training(bgx, lmac, 0);
1114 		lmac_set_lane2sds(bgx, lmac);
1115 		return;
1116 	}
1117 
1118 	/* For DLMs or SLMs on 80/81/83xx so many lane configurations
1119 	 * are possible and vary across boards. Also Kernel doesn't have
1120 	 * any way to identify board type/info and since firmware does,
1121 	 * just take lmac type and serdes lane config as is.
1122 	 */
1123 	cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
1124 	lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
1125 	lane_to_sds = (u8)(cmr_cfg & 0xFF);
1126 	/* Check if config is reset value */
1127 	if ((lmac_type == 0) && (lane_to_sds == 0xE4))
1128 		lmac->lmac_type = BGX_MODE_INVALID;
1129 	else
1130 		lmac->lmac_type = lmac_type;
1131 	lmac->lane_to_sds = lane_to_sds;
1132 	lmac_set_training(bgx, lmac, lmac->lmacid);
1133 }
1134 
1135 static void bgx_get_qlm_mode(struct bgx *bgx)
1136 {
1137 	struct lmac *lmac;
1138 	u8  idx;
1139 
1140 	/* Init all LMAC's type to invalid */
1141 	for (idx = 0; idx < bgx->max_lmac; idx++) {
1142 		lmac = &bgx->lmac[idx];
1143 		lmac->lmacid = idx;
1144 		lmac->lmac_type = BGX_MODE_INVALID;
1145 		lmac->use_training = false;
1146 	}
1147 
1148 	/* It is assumed that low level firmware sets this value */
1149 	bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1150 	if (bgx->lmac_count > bgx->max_lmac)
1151 		bgx->lmac_count = bgx->max_lmac;
1152 
1153 	for (idx = 0; idx < bgx->lmac_count; idx++) {
1154 		bgx_set_lmac_config(bgx, idx);
1155 		bgx_print_qlm_mode(bgx, idx);
1156 	}
1157 }
1158 
1159 #ifdef CONFIG_ACPI
1160 
1161 static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
1162 				u8 *dst)
1163 {
1164 	u8 mac[ETH_ALEN];
1165 	int ret;
1166 
1167 	ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
1168 					    "mac-address", mac, ETH_ALEN);
1169 	if (ret)
1170 		goto out;
1171 
1172 	if (!is_valid_ether_addr(mac)) {
1173 		dev_err(dev, "MAC address invalid: %pM\n", mac);
1174 		ret = -EINVAL;
1175 		goto out;
1176 	}
1177 
1178 	dev_info(dev, "MAC address set to: %pM\n", mac);
1179 
1180 	memcpy(dst, mac, ETH_ALEN);
1181 out:
1182 	return ret;
1183 }
1184 
1185 /* Currently only sets the MAC address. */
1186 static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1187 					 u32 lvl, void *context, void **rv)
1188 {
1189 	struct bgx *bgx = context;
1190 	struct device *dev = &bgx->pdev->dev;
1191 	struct acpi_device *adev;
1192 
1193 	if (acpi_bus_get_device(handle, &adev))
1194 		goto out;
1195 
1196 	acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
1197 
1198 	SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
1199 
1200 	bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
1201 	bgx->acpi_lmac_idx++; /* move to next LMAC */
1202 out:
1203 	return AE_OK;
1204 }
1205 
1206 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
1207 				     void *context, void **ret_val)
1208 {
1209 	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1210 	struct bgx *bgx = context;
1211 	char bgx_sel[5];
1212 
1213 	snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
1214 	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
1215 		pr_warn("Invalid link device\n");
1216 		return AE_OK;
1217 	}
1218 
1219 	if (strncmp(string.pointer, bgx_sel, 4))
1220 		return AE_OK;
1221 
1222 	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1223 			    bgx_acpi_register_phy, NULL, bgx, NULL);
1224 
1225 	kfree(string.pointer);
1226 	return AE_CTRL_TERMINATE;
1227 }
1228 
1229 static int bgx_init_acpi_phy(struct bgx *bgx)
1230 {
1231 	acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
1232 	return 0;
1233 }
1234 
1235 #else
1236 
1237 static int bgx_init_acpi_phy(struct bgx *bgx)
1238 {
1239 	return -ENODEV;
1240 }
1241 
1242 #endif /* CONFIG_ACPI */
1243 
1244 #if IS_ENABLED(CONFIG_OF_MDIO)
1245 
1246 static int bgx_init_of_phy(struct bgx *bgx)
1247 {
1248 	struct fwnode_handle *fwn;
1249 	struct device_node *node = NULL;
1250 	u8 lmac = 0;
1251 
1252 	device_for_each_child_node(&bgx->pdev->dev, fwn) {
1253 		struct phy_device *pd;
1254 		struct device_node *phy_np;
1255 		const char *mac;
1256 
1257 		/* Should always be an OF node.  But if it is not, we
1258 		 * cannot handle it, so exit the loop.
1259 		 */
1260 		node = to_of_node(fwn);
1261 		if (!node)
1262 			break;
1263 
1264 		mac = of_get_mac_address(node);
1265 		if (mac)
1266 			ether_addr_copy(bgx->lmac[lmac].mac, mac);
1267 
1268 		SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
1269 		bgx->lmac[lmac].lmacid = lmac;
1270 
1271 		phy_np = of_parse_phandle(node, "phy-handle", 0);
1272 		/* If there is no phy or defective firmware presents
1273 		 * this cortina phy, for which there is no driver
1274 		 * support, ignore it.
1275 		 */
1276 		if (phy_np &&
1277 		    !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
1278 			/* Wait until the phy drivers are available */
1279 			pd = of_phy_find_device(phy_np);
1280 			if (!pd)
1281 				goto defer;
1282 			bgx->lmac[lmac].phydev = pd;
1283 		}
1284 
1285 		lmac++;
1286 		if (lmac == bgx->max_lmac) {
1287 			of_node_put(node);
1288 			break;
1289 		}
1290 	}
1291 	return 0;
1292 
1293 defer:
1294 	/* We are bailing out, try not to leak device reference counts
1295 	 * for phy devices we may have already found.
1296 	 */
1297 	while (lmac) {
1298 		if (bgx->lmac[lmac].phydev) {
1299 			put_device(&bgx->lmac[lmac].phydev->mdio.dev);
1300 			bgx->lmac[lmac].phydev = NULL;
1301 		}
1302 		lmac--;
1303 	}
1304 	of_node_put(node);
1305 	return -EPROBE_DEFER;
1306 }
1307 
1308 #else
1309 
1310 static int bgx_init_of_phy(struct bgx *bgx)
1311 {
1312 	return -ENODEV;
1313 }
1314 
1315 #endif /* CONFIG_OF_MDIO */
1316 
1317 static int bgx_init_phy(struct bgx *bgx)
1318 {
1319 	if (!acpi_disabled)
1320 		return bgx_init_acpi_phy(bgx);
1321 
1322 	return bgx_init_of_phy(bgx);
1323 }
1324 
1325 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1326 {
1327 	int err;
1328 	struct device *dev = &pdev->dev;
1329 	struct bgx *bgx = NULL;
1330 	u8 lmac;
1331 	u16 sdevid;
1332 
1333 	bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1334 	if (!bgx)
1335 		return -ENOMEM;
1336 	bgx->pdev = pdev;
1337 
1338 	pci_set_drvdata(pdev, bgx);
1339 
1340 	err = pci_enable_device(pdev);
1341 	if (err) {
1342 		dev_err(dev, "Failed to enable PCI device\n");
1343 		pci_set_drvdata(pdev, NULL);
1344 		return err;
1345 	}
1346 
1347 	err = pci_request_regions(pdev, DRV_NAME);
1348 	if (err) {
1349 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1350 		goto err_disable_device;
1351 	}
1352 
1353 	/* MAP configuration registers */
1354 	bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1355 	if (!bgx->reg_base) {
1356 		dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1357 		err = -ENOMEM;
1358 		goto err_release_regions;
1359 	}
1360 
1361 	set_max_bgx_per_node(pdev);
1362 
1363 	pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1364 	if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1365 		bgx->bgx_id = (pci_resource_start(pdev,
1366 			PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1367 		bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
1368 		bgx->max_lmac = MAX_LMAC_PER_BGX;
1369 		bgx_vnic[bgx->bgx_id] = bgx;
1370 	} else {
1371 		bgx->is_rgx = true;
1372 		bgx->max_lmac = 1;
1373 		bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
1374 		bgx_vnic[bgx->bgx_id] = bgx;
1375 		xcv_init_hw();
1376 	}
1377 
1378 	/* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
1379 	 * BGX i.e BGX2 can be split across 2 DLMs.
1380 	 */
1381 	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
1382 	if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
1383 	    ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
1384 		bgx->is_dlm = true;
1385 
1386 	bgx_get_qlm_mode(bgx);
1387 
1388 	err = bgx_init_phy(bgx);
1389 	if (err)
1390 		goto err_enable;
1391 
1392 	bgx_init_hw(bgx);
1393 
1394 	/* Enable all LMACs */
1395 	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1396 		err = bgx_lmac_enable(bgx, lmac);
1397 		if (err) {
1398 			dev_err(dev, "BGX%d failed to enable lmac%d\n",
1399 				bgx->bgx_id, lmac);
1400 			while (lmac)
1401 				bgx_lmac_disable(bgx, --lmac);
1402 			goto err_enable;
1403 		}
1404 	}
1405 
1406 	return 0;
1407 
1408 err_enable:
1409 	bgx_vnic[bgx->bgx_id] = NULL;
1410 err_release_regions:
1411 	pci_release_regions(pdev);
1412 err_disable_device:
1413 	pci_disable_device(pdev);
1414 	pci_set_drvdata(pdev, NULL);
1415 	return err;
1416 }
1417 
1418 static void bgx_remove(struct pci_dev *pdev)
1419 {
1420 	struct bgx *bgx = pci_get_drvdata(pdev);
1421 	u8 lmac;
1422 
1423 	/* Disable all LMACs */
1424 	for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1425 		bgx_lmac_disable(bgx, lmac);
1426 
1427 	bgx_vnic[bgx->bgx_id] = NULL;
1428 	pci_release_regions(pdev);
1429 	pci_disable_device(pdev);
1430 	pci_set_drvdata(pdev, NULL);
1431 }
1432 
1433 static struct pci_driver bgx_driver = {
1434 	.name = DRV_NAME,
1435 	.id_table = bgx_id_table,
1436 	.probe = bgx_probe,
1437 	.remove = bgx_remove,
1438 };
1439 
1440 static int __init bgx_init_module(void)
1441 {
1442 	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1443 
1444 	return pci_register_driver(&bgx_driver);
1445 }
1446 
1447 static void __exit bgx_cleanup_module(void)
1448 {
1449 	pci_unregister_driver(&bgx_driver);
1450 }
1451 
1452 module_init(bgx_init_module);
1453 module_exit(bgx_cleanup_module);
1454