1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/module.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/phy.h> 16 #include <linux/of.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 20 #include "nic_reg.h" 21 #include "nic.h" 22 #include "thunder_bgx.h" 23 24 #define DRV_NAME "thunder-BGX" 25 #define DRV_VERSION "1.0" 26 27 struct lmac { 28 struct bgx *bgx; 29 int dmac; 30 u8 mac[ETH_ALEN]; 31 u8 lmac_type; 32 u8 lane_to_sds; 33 bool use_training; 34 bool link_up; 35 int lmacid; /* ID within BGX */ 36 int lmacid_bd; /* ID on board */ 37 struct net_device netdev; 38 struct phy_device *phydev; 39 unsigned int last_duplex; 40 unsigned int last_link; 41 unsigned int last_speed; 42 bool is_sgmii; 43 struct delayed_work dwork; 44 struct workqueue_struct *check_link; 45 }; 46 47 struct bgx { 48 u8 bgx_id; 49 struct lmac lmac[MAX_LMAC_PER_BGX]; 50 int lmac_count; 51 u8 max_lmac; 52 void __iomem *reg_base; 53 struct pci_dev *pdev; 54 bool is_dlm; 55 bool is_rgx; 56 }; 57 58 static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 59 static int lmac_count; /* Total no of LMACs in system */ 60 61 static int bgx_xaui_check_link(struct lmac *lmac); 62 63 /* Supported devices */ 64 static const struct pci_device_id bgx_id_table[] = { 65 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, 66 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, 67 { 0, } /* end of table */ 68 }; 69 70 MODULE_AUTHOR("Cavium Inc"); 71 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); 72 MODULE_LICENSE("GPL v2"); 73 MODULE_VERSION(DRV_VERSION); 74 MODULE_DEVICE_TABLE(pci, bgx_id_table); 75 76 /* The Cavium ThunderX network controller can *only* be found in SoCs 77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 78 * registers on this platform are implicitly strongly ordered with respect 79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 80 * with no memory barriers in this driver. The readq()/writeq() functions add 81 * explicit ordering operation which in this case are redundant, and only 82 * add overhead. 83 */ 84 85 /* Register read/write APIs */ 86 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) 87 { 88 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 89 90 return readq_relaxed(addr); 91 } 92 93 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 94 { 95 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 96 97 writeq_relaxed(val, addr); 98 } 99 100 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 101 { 102 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 103 104 writeq_relaxed(val | readq_relaxed(addr), addr); 105 } 106 107 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) 108 { 109 int timeout = 100; 110 u64 reg_val; 111 112 while (timeout) { 113 reg_val = bgx_reg_read(bgx, lmac, reg); 114 if (zero && !(reg_val & mask)) 115 return 0; 116 if (!zero && (reg_val & mask)) 117 return 0; 118 usleep_range(1000, 2000); 119 timeout--; 120 } 121 return 1; 122 } 123 124 /* Return number of BGX present in HW */ 125 unsigned bgx_get_map(int node) 126 { 127 int i; 128 unsigned map = 0; 129 130 for (i = 0; i < MAX_BGX_PER_NODE; i++) { 131 if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) 132 map |= (1 << i); 133 } 134 135 return map; 136 } 137 EXPORT_SYMBOL(bgx_get_map); 138 139 /* Return number of LMAC configured for this BGX */ 140 int bgx_get_lmac_count(int node, int bgx_idx) 141 { 142 struct bgx *bgx; 143 144 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 145 if (bgx) 146 return bgx->lmac_count; 147 148 return 0; 149 } 150 EXPORT_SYMBOL(bgx_get_lmac_count); 151 152 /* Returns the current link status of LMAC */ 153 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 154 { 155 struct bgx_link_status *link = (struct bgx_link_status *)status; 156 struct bgx *bgx; 157 struct lmac *lmac; 158 159 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 160 if (!bgx) 161 return; 162 163 lmac = &bgx->lmac[lmacid]; 164 link->mac_type = lmac->lmac_type; 165 link->link_up = lmac->link_up; 166 link->duplex = lmac->last_duplex; 167 link->speed = lmac->last_speed; 168 } 169 EXPORT_SYMBOL(bgx_get_lmac_link_state); 170 171 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 172 { 173 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 174 175 if (bgx) 176 return bgx->lmac[lmacid].mac; 177 178 return NULL; 179 } 180 EXPORT_SYMBOL(bgx_get_lmac_mac); 181 182 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 183 { 184 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 185 186 if (!bgx) 187 return; 188 189 ether_addr_copy(bgx->lmac[lmacid].mac, mac); 190 } 191 EXPORT_SYMBOL(bgx_set_lmac_mac); 192 193 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 194 { 195 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 196 struct lmac *lmac; 197 u64 cfg; 198 199 if (!bgx) 200 return; 201 lmac = &bgx->lmac[lmacid]; 202 203 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 204 if (enable) 205 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; 206 else 207 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 208 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 209 210 if (bgx->is_rgx) 211 xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed); 212 } 213 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); 214 215 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) 216 { 217 struct pfc *pfc = (struct pfc *)pause; 218 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 219 struct lmac *lmac; 220 u64 cfg; 221 222 if (!bgx) 223 return; 224 lmac = &bgx->lmac[lmacid]; 225 if (lmac->is_sgmii) 226 return; 227 228 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); 229 pfc->fc_rx = cfg & RX_EN; 230 pfc->fc_tx = cfg & TX_EN; 231 pfc->autoneg = 0; 232 } 233 EXPORT_SYMBOL(bgx_lmac_get_pfc); 234 235 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) 236 { 237 struct pfc *pfc = (struct pfc *)pause; 238 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 239 struct lmac *lmac; 240 u64 cfg; 241 242 if (!bgx) 243 return; 244 lmac = &bgx->lmac[lmacid]; 245 if (lmac->is_sgmii) 246 return; 247 248 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); 249 cfg &= ~(RX_EN | TX_EN); 250 cfg |= (pfc->fc_rx ? RX_EN : 0x00); 251 cfg |= (pfc->fc_tx ? TX_EN : 0x00); 252 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg); 253 } 254 EXPORT_SYMBOL(bgx_lmac_set_pfc); 255 256 static void bgx_sgmii_change_link_state(struct lmac *lmac) 257 { 258 struct bgx *bgx = lmac->bgx; 259 u64 cmr_cfg; 260 u64 port_cfg = 0; 261 u64 misc_ctl = 0; 262 263 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 264 cmr_cfg &= ~CMR_EN; 265 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 266 267 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 268 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 269 270 if (lmac->link_up) { 271 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 272 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 273 port_cfg |= (lmac->last_duplex << 2); 274 } else { 275 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 276 } 277 278 switch (lmac->last_speed) { 279 case 10: 280 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 281 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 282 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 283 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 284 misc_ctl |= 50; /* samp_pt */ 285 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 286 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 287 break; 288 case 100: 289 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 290 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 291 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 292 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 293 misc_ctl |= 5; /* samp_pt */ 294 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 295 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 296 break; 297 case 1000: 298 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 299 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 300 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 301 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 302 misc_ctl |= 1; /* samp_pt */ 303 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 304 if (lmac->last_duplex) 305 bgx_reg_write(bgx, lmac->lmacid, 306 BGX_GMP_GMI_TXX_BURST, 0); 307 else 308 bgx_reg_write(bgx, lmac->lmacid, 309 BGX_GMP_GMI_TXX_BURST, 8192); 310 break; 311 default: 312 break; 313 } 314 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 315 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 316 317 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 318 319 /* Re-enable lmac */ 320 cmr_cfg |= CMR_EN; 321 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 322 323 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) 324 xcv_setup_link(lmac->link_up, lmac->last_speed); 325 } 326 327 static void bgx_lmac_handler(struct net_device *netdev) 328 { 329 struct lmac *lmac = container_of(netdev, struct lmac, netdev); 330 struct phy_device *phydev; 331 int link_changed = 0; 332 333 if (!lmac) 334 return; 335 336 phydev = lmac->phydev; 337 338 if (!phydev->link && lmac->last_link) 339 link_changed = -1; 340 341 if (phydev->link && 342 (lmac->last_duplex != phydev->duplex || 343 lmac->last_link != phydev->link || 344 lmac->last_speed != phydev->speed)) { 345 link_changed = 1; 346 } 347 348 lmac->last_link = phydev->link; 349 lmac->last_speed = phydev->speed; 350 lmac->last_duplex = phydev->duplex; 351 352 if (!link_changed) 353 return; 354 355 if (link_changed > 0) 356 lmac->link_up = true; 357 else 358 lmac->link_up = false; 359 360 if (lmac->is_sgmii) 361 bgx_sgmii_change_link_state(lmac); 362 else 363 bgx_xaui_check_link(lmac); 364 } 365 366 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 367 { 368 struct bgx *bgx; 369 370 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 371 if (!bgx) 372 return 0; 373 374 if (idx > 8) 375 lmac = 0; 376 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); 377 } 378 EXPORT_SYMBOL(bgx_get_rx_stats); 379 380 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 381 { 382 struct bgx *bgx; 383 384 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 385 if (!bgx) 386 return 0; 387 388 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); 389 } 390 EXPORT_SYMBOL(bgx_get_tx_stats); 391 392 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) 393 { 394 u64 offset; 395 396 while (bgx->lmac[lmac].dmac > 0) { 397 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + 398 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); 399 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); 400 bgx->lmac[lmac].dmac--; 401 } 402 } 403 404 /* Configure BGX LMAC in internal loopback mode */ 405 void bgx_lmac_internal_loopback(int node, int bgx_idx, 406 int lmac_idx, bool enable) 407 { 408 struct bgx *bgx; 409 struct lmac *lmac; 410 u64 cfg; 411 412 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 413 if (!bgx) 414 return; 415 416 lmac = &bgx->lmac[lmac_idx]; 417 if (lmac->is_sgmii) { 418 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 419 if (enable) 420 cfg |= PCS_MRX_CTL_LOOPBACK1; 421 else 422 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 423 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 424 } else { 425 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 426 if (enable) 427 cfg |= SPU_CTL_LOOPBACK; 428 else 429 cfg &= ~SPU_CTL_LOOPBACK; 430 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 431 } 432 } 433 EXPORT_SYMBOL(bgx_lmac_internal_loopback); 434 435 static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) 436 { 437 int lmacid = lmac->lmacid; 438 u64 cfg; 439 440 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 441 /* max packet size */ 442 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 443 444 /* Disable frame alignment if using preamble */ 445 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 446 if (cfg & 1) 447 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 448 449 /* Enable lmac */ 450 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 451 452 /* PCS reset */ 453 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 454 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 455 PCS_MRX_CTL_RESET, true)) { 456 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); 457 return -1; 458 } 459 460 /* power down, reset autoneg, autoneg enable */ 461 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 462 cfg &= ~PCS_MRX_CTL_PWR_DN; 463 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 464 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 465 466 if (lmac->lmac_type == BGX_MODE_QSGMII) { 467 /* Disable disparity check for QSGMII */ 468 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); 469 cfg &= ~PCS_MISC_CTL_DISP_EN; 470 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); 471 return 0; 472 } 473 474 if (lmac->lmac_type == BGX_MODE_SGMII) { 475 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 476 PCS_MRX_STATUS_AN_CPT, false)) { 477 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 478 return -1; 479 } 480 } 481 482 return 0; 483 } 484 485 static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) 486 { 487 u64 cfg; 488 int lmacid = lmac->lmacid; 489 490 /* Reset SPU */ 491 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 492 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 493 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 494 return -1; 495 } 496 497 /* Disable LMAC */ 498 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 499 cfg &= ~CMR_EN; 500 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 501 502 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 503 /* Set interleaved running disparity for RXAUI */ 504 if (lmac->lmac_type == BGX_MODE_RXAUI) 505 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 506 SPU_MISC_CTL_INTLV_RDISP); 507 508 /* Clear receive packet disable */ 509 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 510 cfg &= ~SPU_MISC_CTL_RX_DIS; 511 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 512 513 /* clear all interrupts */ 514 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 515 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 516 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 517 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 518 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 519 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 520 521 if (lmac->use_training) { 522 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 523 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 524 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 525 /* training enable */ 526 bgx_reg_modify(bgx, lmacid, 527 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); 528 } 529 530 /* Append FCS to each packet */ 531 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 532 533 /* Disable forward error correction */ 534 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 535 cfg &= ~SPU_FEC_CTL_FEC_EN; 536 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 537 538 /* Disable autoneg */ 539 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 540 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 541 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 542 543 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 544 if (lmac->lmac_type == BGX_MODE_10G_KR) 545 cfg |= (1 << 23); 546 else if (lmac->lmac_type == BGX_MODE_40G_KR) 547 cfg |= (1 << 24); 548 else 549 cfg &= ~((1 << 23) | (1 << 24)); 550 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); 551 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 552 553 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 554 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 555 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 556 557 /* Enable lmac */ 558 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 559 560 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 561 cfg &= ~SPU_CTL_LOW_POWER; 562 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 563 564 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 565 cfg &= ~SMU_TX_CTL_UNI_EN; 566 cfg |= SMU_TX_CTL_DIC_EN; 567 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 568 569 /* Enable receive and transmission of pause frames */ 570 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) | 571 BCK_EN | DRP_EN | TX_EN | RX_EN)); 572 /* Configure pause time and interval */ 573 bgx_reg_write(bgx, lmacid, 574 BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); 575 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL); 576 cfg &= ~0xFFFFull; 577 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL, 578 cfg | (DEFAULT_PAUSE_TIME - 0x1000)); 579 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01); 580 581 /* take lmac_count into account */ 582 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 583 /* max packet size */ 584 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 585 586 return 0; 587 } 588 589 static int bgx_xaui_check_link(struct lmac *lmac) 590 { 591 struct bgx *bgx = lmac->bgx; 592 int lmacid = lmac->lmacid; 593 int lmac_type = lmac->lmac_type; 594 u64 cfg; 595 596 if (lmac->use_training) { 597 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 598 if (!(cfg & (1ull << 13))) { 599 cfg = (1ull << 13) | (1ull << 14); 600 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 601 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 602 cfg |= (1ull << 0); 603 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 604 return -1; 605 } 606 } 607 608 /* wait for PCS to come out of reset */ 609 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 610 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 611 return -1; 612 } 613 614 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 615 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 616 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 617 SPU_BR_STATUS_BLK_LOCK, false)) { 618 dev_err(&bgx->pdev->dev, 619 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 620 return -1; 621 } 622 } else { 623 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 624 SPU_BX_STATUS_RX_ALIGN, false)) { 625 dev_err(&bgx->pdev->dev, 626 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 627 return -1; 628 } 629 } 630 631 /* Clear rcvflt bit (latching high) and read it back */ 632 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) 633 bgx_reg_modify(bgx, lmacid, 634 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 635 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 636 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 637 if (lmac->use_training) { 638 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 639 if (!(cfg & (1ull << 13))) { 640 cfg = (1ull << 13) | (1ull << 14); 641 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 642 cfg = bgx_reg_read(bgx, lmacid, 643 BGX_SPUX_BR_PMD_CRTL); 644 cfg |= (1ull << 0); 645 bgx_reg_write(bgx, lmacid, 646 BGX_SPUX_BR_PMD_CRTL, cfg); 647 return -1; 648 } 649 } 650 return -1; 651 } 652 653 /* Wait for BGX RX to be idle */ 654 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 655 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 656 return -1; 657 } 658 659 /* Wait for BGX TX to be idle */ 660 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { 661 dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); 662 return -1; 663 } 664 665 /* Check for MAC RX faults */ 666 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); 667 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ 668 cfg &= SMU_RX_CTL_STATUS; 669 if (!cfg) 670 return 0; 671 672 /* Rx local/remote fault seen. 673 * Do lmac reinit to see if condition recovers 674 */ 675 bgx_lmac_xaui_init(bgx, lmac); 676 677 return -1; 678 } 679 680 static void bgx_poll_for_link(struct work_struct *work) 681 { 682 struct lmac *lmac; 683 u64 spu_link, smu_link; 684 685 lmac = container_of(work, struct lmac, dwork.work); 686 687 /* Receive link is latching low. Force it high and verify it */ 688 bgx_reg_modify(lmac->bgx, lmac->lmacid, 689 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 690 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 691 SPU_STATUS1_RCV_LNK, false); 692 693 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 694 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); 695 696 if ((spu_link & SPU_STATUS1_RCV_LNK) && 697 !(smu_link & SMU_RX_CTL_STATUS)) { 698 lmac->link_up = 1; 699 if (lmac->lmac_type == BGX_MODE_XLAUI) 700 lmac->last_speed = 40000; 701 else 702 lmac->last_speed = 10000; 703 lmac->last_duplex = 1; 704 } else { 705 lmac->link_up = 0; 706 lmac->last_speed = SPEED_UNKNOWN; 707 lmac->last_duplex = DUPLEX_UNKNOWN; 708 } 709 710 if (lmac->last_link != lmac->link_up) { 711 if (lmac->link_up) { 712 if (bgx_xaui_check_link(lmac)) { 713 /* Errors, clear link_up state */ 714 lmac->link_up = 0; 715 lmac->last_speed = SPEED_UNKNOWN; 716 lmac->last_duplex = DUPLEX_UNKNOWN; 717 } 718 } 719 lmac->last_link = lmac->link_up; 720 } 721 722 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 723 } 724 725 static int phy_interface_mode(u8 lmac_type) 726 { 727 if (lmac_type == BGX_MODE_QSGMII) 728 return PHY_INTERFACE_MODE_QSGMII; 729 if (lmac_type == BGX_MODE_RGMII) 730 return PHY_INTERFACE_MODE_RGMII; 731 732 return PHY_INTERFACE_MODE_SGMII; 733 } 734 735 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 736 { 737 struct lmac *lmac; 738 u64 cfg; 739 740 lmac = &bgx->lmac[lmacid]; 741 lmac->bgx = bgx; 742 743 if ((lmac->lmac_type == BGX_MODE_SGMII) || 744 (lmac->lmac_type == BGX_MODE_QSGMII) || 745 (lmac->lmac_type == BGX_MODE_RGMII)) { 746 lmac->is_sgmii = 1; 747 if (bgx_lmac_sgmii_init(bgx, lmac)) 748 return -1; 749 } else { 750 lmac->is_sgmii = 0; 751 if (bgx_lmac_xaui_init(bgx, lmac)) 752 return -1; 753 } 754 755 if (lmac->is_sgmii) { 756 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 757 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 758 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 759 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 760 } else { 761 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 762 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 763 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 764 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 765 } 766 767 /* Enable lmac */ 768 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 769 770 /* Restore default cfg, incase low level firmware changed it */ 771 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 772 773 if ((lmac->lmac_type != BGX_MODE_XFI) && 774 (lmac->lmac_type != BGX_MODE_XLAUI) && 775 (lmac->lmac_type != BGX_MODE_40G_KR) && 776 (lmac->lmac_type != BGX_MODE_10G_KR)) { 777 if (!lmac->phydev) 778 return -ENODEV; 779 780 lmac->phydev->dev_flags = 0; 781 782 if (phy_connect_direct(&lmac->netdev, lmac->phydev, 783 bgx_lmac_handler, 784 phy_interface_mode(lmac->lmac_type))) 785 return -ENODEV; 786 787 phy_start_aneg(lmac->phydev); 788 } else { 789 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 790 WQ_MEM_RECLAIM, 1); 791 if (!lmac->check_link) 792 return -ENOMEM; 793 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 794 queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 795 } 796 797 return 0; 798 } 799 800 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 801 { 802 struct lmac *lmac; 803 u64 cfg; 804 805 lmac = &bgx->lmac[lmacid]; 806 if (lmac->check_link) { 807 /* Destroy work queue */ 808 cancel_delayed_work_sync(&lmac->dwork); 809 destroy_workqueue(lmac->check_link); 810 } 811 812 /* Disable packet reception */ 813 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 814 cfg &= ~CMR_PKT_RX_EN; 815 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 816 817 /* Give chance for Rx/Tx FIFO to get drained */ 818 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); 819 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); 820 821 /* Disable packet transmission */ 822 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 823 cfg &= ~CMR_PKT_TX_EN; 824 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 825 826 /* Disable serdes lanes */ 827 if (!lmac->is_sgmii) 828 bgx_reg_modify(bgx, lmacid, 829 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 830 else 831 bgx_reg_modify(bgx, lmacid, 832 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); 833 834 /* Disable LMAC */ 835 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 836 cfg &= ~CMR_EN; 837 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 838 839 bgx_flush_dmac_addrs(bgx, lmacid); 840 841 if ((lmac->lmac_type != BGX_MODE_XFI) && 842 (lmac->lmac_type != BGX_MODE_XLAUI) && 843 (lmac->lmac_type != BGX_MODE_40G_KR) && 844 (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 845 phy_disconnect(lmac->phydev); 846 847 lmac->phydev = NULL; 848 } 849 850 static void bgx_init_hw(struct bgx *bgx) 851 { 852 int i; 853 struct lmac *lmac; 854 855 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 856 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 857 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); 858 859 /* Set lmac type and lane2serdes mapping */ 860 for (i = 0; i < bgx->lmac_count; i++) { 861 lmac = &bgx->lmac[i]; 862 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 863 (lmac->lmac_type << 8) | lmac->lane_to_sds); 864 bgx->lmac[i].lmacid_bd = lmac_count; 865 lmac_count++; 866 } 867 868 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 869 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 870 871 /* Set the backpressure AND mask */ 872 for (i = 0; i < bgx->lmac_count; i++) 873 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 874 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << 875 (i * MAX_BGX_CHANS_PER_LMAC)); 876 877 /* Disable all MAC filtering */ 878 for (i = 0; i < RX_DMAC_COUNT; i++) 879 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 880 881 /* Disable MAC steering (NCSI traffic) */ 882 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 883 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 884 } 885 886 static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 887 { 888 return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF); 889 } 890 891 static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) 892 { 893 struct device *dev = &bgx->pdev->dev; 894 struct lmac *lmac; 895 char str[20]; 896 u8 dlm; 897 898 if (lmacid > bgx->max_lmac) 899 return; 900 901 lmac = &bgx->lmac[lmacid]; 902 dlm = (lmacid / 2) + (bgx->bgx_id * 2); 903 if (!bgx->is_dlm) 904 sprintf(str, "BGX%d QLM mode", bgx->bgx_id); 905 else 906 sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); 907 908 switch (lmac->lmac_type) { 909 case BGX_MODE_SGMII: 910 dev_info(dev, "%s: SGMII\n", (char *)str); 911 break; 912 case BGX_MODE_XAUI: 913 dev_info(dev, "%s: XAUI\n", (char *)str); 914 break; 915 case BGX_MODE_RXAUI: 916 dev_info(dev, "%s: RXAUI\n", (char *)str); 917 break; 918 case BGX_MODE_XFI: 919 if (!lmac->use_training) 920 dev_info(dev, "%s: XFI\n", (char *)str); 921 else 922 dev_info(dev, "%s: 10G_KR\n", (char *)str); 923 break; 924 case BGX_MODE_XLAUI: 925 if (!lmac->use_training) 926 dev_info(dev, "%s: XLAUI\n", (char *)str); 927 else 928 dev_info(dev, "%s: 40G_KR4\n", (char *)str); 929 break; 930 case BGX_MODE_QSGMII: 931 if ((lmacid == 0) && 932 (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid)) 933 return; 934 if ((lmacid == 2) && 935 (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid)) 936 return; 937 dev_info(dev, "%s: QSGMII\n", (char *)str); 938 break; 939 case BGX_MODE_RGMII: 940 dev_info(dev, "%s: RGMII\n", (char *)str); 941 break; 942 case BGX_MODE_INVALID: 943 /* Nothing to do */ 944 break; 945 } 946 } 947 948 static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) 949 { 950 switch (lmac->lmac_type) { 951 case BGX_MODE_SGMII: 952 case BGX_MODE_XFI: 953 lmac->lane_to_sds = lmac->lmacid; 954 break; 955 case BGX_MODE_XAUI: 956 case BGX_MODE_XLAUI: 957 case BGX_MODE_RGMII: 958 lmac->lane_to_sds = 0xE4; 959 break; 960 case BGX_MODE_RXAUI: 961 lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; 962 break; 963 case BGX_MODE_QSGMII: 964 /* There is no way to determine if DLM0/2 is QSGMII or 965 * DLM1/3 is configured to QSGMII as bootloader will 966 * configure all LMACs, so take whatever is configured 967 * by low level firmware. 968 */ 969 lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); 970 break; 971 default: 972 lmac->lane_to_sds = 0; 973 break; 974 } 975 } 976 977 static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) 978 { 979 if ((lmac->lmac_type != BGX_MODE_10G_KR) && 980 (lmac->lmac_type != BGX_MODE_40G_KR)) { 981 lmac->use_training = 0; 982 return; 983 } 984 985 lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) & 986 SPU_PMD_CRTL_TRAIN_EN; 987 } 988 989 static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) 990 { 991 struct lmac *lmac; 992 struct lmac *olmac; 993 u64 cmr_cfg; 994 u8 lmac_type; 995 u8 lane_to_sds; 996 997 lmac = &bgx->lmac[idx]; 998 999 if (!bgx->is_dlm || bgx->is_rgx) { 1000 /* Read LMAC0 type to figure out QLM mode 1001 * This is configured by low level firmware 1002 */ 1003 cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 1004 lmac->lmac_type = (cmr_cfg >> 8) & 0x07; 1005 if (bgx->is_rgx) 1006 lmac->lmac_type = BGX_MODE_RGMII; 1007 lmac_set_training(bgx, lmac, 0); 1008 lmac_set_lane2sds(bgx, lmac); 1009 return; 1010 } 1011 1012 /* On 81xx BGX can be split across 2 DLMs 1013 * firmware programs lmac_type of LMAC0 and LMAC2 1014 */ 1015 if ((idx == 0) || (idx == 2)) { 1016 cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); 1017 lmac_type = (u8)((cmr_cfg >> 8) & 0x07); 1018 lane_to_sds = (u8)(cmr_cfg & 0xFF); 1019 /* Check if config is not reset value */ 1020 if ((lmac_type == 0) && (lane_to_sds == 0xE4)) 1021 lmac->lmac_type = BGX_MODE_INVALID; 1022 else 1023 lmac->lmac_type = lmac_type; 1024 lmac_set_training(bgx, lmac, lmac->lmacid); 1025 lmac_set_lane2sds(bgx, lmac); 1026 1027 olmac = &bgx->lmac[idx + 1]; 1028 /* Check if other LMAC on the same DLM is already configured by 1029 * firmware, if so use the same config or else set as same, as 1030 * that of LMAC 0/2. 1031 * This check is needed as on 80xx only one lane of each of the 1032 * DLM of BGX0 is used, so have to rely on firmware for 1033 * distingushing 80xx from 81xx. 1034 */ 1035 cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); 1036 lmac_type = (u8)((cmr_cfg >> 8) & 0x07); 1037 lane_to_sds = (u8)(cmr_cfg & 0xFF); 1038 if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { 1039 olmac->lmac_type = lmac->lmac_type; 1040 lmac_set_lane2sds(bgx, olmac); 1041 } else { 1042 olmac->lmac_type = lmac_type; 1043 olmac->lane_to_sds = lane_to_sds; 1044 } 1045 lmac_set_training(bgx, olmac, olmac->lmacid); 1046 } 1047 } 1048 1049 static bool is_dlm0_in_bgx_mode(struct bgx *bgx) 1050 { 1051 struct lmac *lmac; 1052 1053 if (!bgx->is_dlm) 1054 return true; 1055 1056 lmac = &bgx->lmac[0]; 1057 if (lmac->lmac_type == BGX_MODE_INVALID) 1058 return false; 1059 1060 return true; 1061 } 1062 1063 static void bgx_get_qlm_mode(struct bgx *bgx) 1064 { 1065 struct lmac *lmac; 1066 struct lmac *lmac01; 1067 struct lmac *lmac23; 1068 u8 idx; 1069 1070 /* Init all LMAC's type to invalid */ 1071 for (idx = 0; idx < bgx->max_lmac; idx++) { 1072 lmac = &bgx->lmac[idx]; 1073 lmac->lmacid = idx; 1074 lmac->lmac_type = BGX_MODE_INVALID; 1075 lmac->use_training = false; 1076 } 1077 1078 /* It is assumed that low level firmware sets this value */ 1079 bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 1080 if (bgx->lmac_count > bgx->max_lmac) 1081 bgx->lmac_count = bgx->max_lmac; 1082 1083 for (idx = 0; idx < bgx->max_lmac; idx++) 1084 bgx_set_lmac_config(bgx, idx); 1085 1086 if (!bgx->is_dlm || bgx->is_rgx) { 1087 bgx_print_qlm_mode(bgx, 0); 1088 return; 1089 } 1090 1091 if (bgx->lmac_count) { 1092 bgx_print_qlm_mode(bgx, 0); 1093 bgx_print_qlm_mode(bgx, 2); 1094 } 1095 1096 /* If DLM0 is not in BGX mode then LMAC0/1 have 1097 * to be configured with serdes lanes of DLM1 1098 */ 1099 if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) 1100 return; 1101 for (idx = 0; idx < bgx->lmac_count; idx++) { 1102 lmac01 = &bgx->lmac[idx]; 1103 lmac23 = &bgx->lmac[idx + 2]; 1104 lmac01->lmac_type = lmac23->lmac_type; 1105 lmac01->lane_to_sds = lmac23->lane_to_sds; 1106 } 1107 } 1108 1109 #ifdef CONFIG_ACPI 1110 1111 static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, 1112 u8 *dst) 1113 { 1114 u8 mac[ETH_ALEN]; 1115 int ret; 1116 1117 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 1118 "mac-address", mac, ETH_ALEN); 1119 if (ret) 1120 goto out; 1121 1122 if (!is_valid_ether_addr(mac)) { 1123 dev_err(dev, "MAC address invalid: %pM\n", mac); 1124 ret = -EINVAL; 1125 goto out; 1126 } 1127 1128 dev_info(dev, "MAC address set to: %pM\n", mac); 1129 1130 memcpy(dst, mac, ETH_ALEN); 1131 out: 1132 return ret; 1133 } 1134 1135 /* Currently only sets the MAC address. */ 1136 static acpi_status bgx_acpi_register_phy(acpi_handle handle, 1137 u32 lvl, void *context, void **rv) 1138 { 1139 struct bgx *bgx = context; 1140 struct device *dev = &bgx->pdev->dev; 1141 struct acpi_device *adev; 1142 1143 if (acpi_bus_get_device(handle, &adev)) 1144 goto out; 1145 1146 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); 1147 1148 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); 1149 1150 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 1151 out: 1152 bgx->lmac_count++; 1153 return AE_OK; 1154 } 1155 1156 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, 1157 void *context, void **ret_val) 1158 { 1159 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 1160 struct bgx *bgx = context; 1161 char bgx_sel[5]; 1162 1163 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); 1164 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { 1165 pr_warn("Invalid link device\n"); 1166 return AE_OK; 1167 } 1168 1169 if (strncmp(string.pointer, bgx_sel, 4)) 1170 return AE_OK; 1171 1172 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1173 bgx_acpi_register_phy, NULL, bgx, NULL); 1174 1175 kfree(string.pointer); 1176 return AE_CTRL_TERMINATE; 1177 } 1178 1179 static int bgx_init_acpi_phy(struct bgx *bgx) 1180 { 1181 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); 1182 return 0; 1183 } 1184 1185 #else 1186 1187 static int bgx_init_acpi_phy(struct bgx *bgx) 1188 { 1189 return -ENODEV; 1190 } 1191 1192 #endif /* CONFIG_ACPI */ 1193 1194 #if IS_ENABLED(CONFIG_OF_MDIO) 1195 1196 static int bgx_init_of_phy(struct bgx *bgx) 1197 { 1198 struct fwnode_handle *fwn; 1199 struct device_node *node = NULL; 1200 u8 lmac = 0; 1201 1202 device_for_each_child_node(&bgx->pdev->dev, fwn) { 1203 struct phy_device *pd; 1204 struct device_node *phy_np; 1205 const char *mac; 1206 1207 /* Should always be an OF node. But if it is not, we 1208 * cannot handle it, so exit the loop. 1209 */ 1210 node = to_of_node(fwn); 1211 if (!node) 1212 break; 1213 1214 mac = of_get_mac_address(node); 1215 if (mac) 1216 ether_addr_copy(bgx->lmac[lmac].mac, mac); 1217 1218 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 1219 bgx->lmac[lmac].lmacid = lmac; 1220 1221 phy_np = of_parse_phandle(node, "phy-handle", 0); 1222 /* If there is no phy or defective firmware presents 1223 * this cortina phy, for which there is no driver 1224 * support, ignore it. 1225 */ 1226 if (phy_np && 1227 !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { 1228 /* Wait until the phy drivers are available */ 1229 pd = of_phy_find_device(phy_np); 1230 if (!pd) 1231 goto defer; 1232 bgx->lmac[lmac].phydev = pd; 1233 } 1234 1235 lmac++; 1236 if (lmac == bgx->max_lmac) { 1237 of_node_put(node); 1238 break; 1239 } 1240 } 1241 return 0; 1242 1243 defer: 1244 /* We are bailing out, try not to leak device reference counts 1245 * for phy devices we may have already found. 1246 */ 1247 while (lmac) { 1248 if (bgx->lmac[lmac].phydev) { 1249 put_device(&bgx->lmac[lmac].phydev->mdio.dev); 1250 bgx->lmac[lmac].phydev = NULL; 1251 } 1252 lmac--; 1253 } 1254 of_node_put(node); 1255 return -EPROBE_DEFER; 1256 } 1257 1258 #else 1259 1260 static int bgx_init_of_phy(struct bgx *bgx) 1261 { 1262 return -ENODEV; 1263 } 1264 1265 #endif /* CONFIG_OF_MDIO */ 1266 1267 static int bgx_init_phy(struct bgx *bgx) 1268 { 1269 if (!acpi_disabled) 1270 return bgx_init_acpi_phy(bgx); 1271 1272 return bgx_init_of_phy(bgx); 1273 } 1274 1275 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1276 { 1277 int err; 1278 struct device *dev = &pdev->dev; 1279 struct bgx *bgx = NULL; 1280 u8 lmac; 1281 u16 sdevid; 1282 1283 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1284 if (!bgx) 1285 return -ENOMEM; 1286 bgx->pdev = pdev; 1287 1288 pci_set_drvdata(pdev, bgx); 1289 1290 err = pci_enable_device(pdev); 1291 if (err) { 1292 dev_err(dev, "Failed to enable PCI device\n"); 1293 pci_set_drvdata(pdev, NULL); 1294 return err; 1295 } 1296 1297 err = pci_request_regions(pdev, DRV_NAME); 1298 if (err) { 1299 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1300 goto err_disable_device; 1301 } 1302 1303 /* MAP configuration registers */ 1304 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1305 if (!bgx->reg_base) { 1306 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); 1307 err = -ENOMEM; 1308 goto err_release_regions; 1309 } 1310 1311 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1312 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1313 bgx->bgx_id = (pci_resource_start(pdev, 1314 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1315 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1316 bgx->max_lmac = MAX_LMAC_PER_BGX; 1317 bgx_vnic[bgx->bgx_id] = bgx; 1318 } else { 1319 bgx->is_rgx = true; 1320 bgx->max_lmac = 1; 1321 bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; 1322 bgx_vnic[bgx->bgx_id] = bgx; 1323 xcv_init_hw(); 1324 } 1325 1326 /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one 1327 * BGX i.e BGX2 can be split across 2 DLMs. 1328 */ 1329 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 1330 if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || 1331 ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) 1332 bgx->is_dlm = true; 1333 1334 bgx_get_qlm_mode(bgx); 1335 1336 err = bgx_init_phy(bgx); 1337 if (err) 1338 goto err_enable; 1339 1340 bgx_init_hw(bgx); 1341 1342 /* Enable all LMACs */ 1343 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1344 err = bgx_lmac_enable(bgx, lmac); 1345 if (err) { 1346 dev_err(dev, "BGX%d failed to enable lmac%d\n", 1347 bgx->bgx_id, lmac); 1348 while (lmac) 1349 bgx_lmac_disable(bgx, --lmac); 1350 goto err_enable; 1351 } 1352 } 1353 1354 return 0; 1355 1356 err_enable: 1357 bgx_vnic[bgx->bgx_id] = NULL; 1358 err_release_regions: 1359 pci_release_regions(pdev); 1360 err_disable_device: 1361 pci_disable_device(pdev); 1362 pci_set_drvdata(pdev, NULL); 1363 return err; 1364 } 1365 1366 static void bgx_remove(struct pci_dev *pdev) 1367 { 1368 struct bgx *bgx = pci_get_drvdata(pdev); 1369 u8 lmac; 1370 1371 /* Disable all LMACs */ 1372 for (lmac = 0; lmac < bgx->lmac_count; lmac++) 1373 bgx_lmac_disable(bgx, lmac); 1374 1375 bgx_vnic[bgx->bgx_id] = NULL; 1376 pci_release_regions(pdev); 1377 pci_disable_device(pdev); 1378 pci_set_drvdata(pdev, NULL); 1379 } 1380 1381 static struct pci_driver bgx_driver = { 1382 .name = DRV_NAME, 1383 .id_table = bgx_id_table, 1384 .probe = bgx_probe, 1385 .remove = bgx_remove, 1386 }; 1387 1388 static int __init bgx_init_module(void) 1389 { 1390 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1391 1392 return pci_register_driver(&bgx_driver); 1393 } 1394 1395 static void __exit bgx_cleanup_module(void) 1396 { 1397 pci_unregister_driver(&bgx_driver); 1398 } 1399 1400 module_init(bgx_init_module); 1401 module_exit(bgx_cleanup_module); 1402