1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/module.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/phy.h> 16 #include <linux/of.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 20 #include "nic_reg.h" 21 #include "nic.h" 22 #include "thunder_bgx.h" 23 24 #define DRV_NAME "thunder-BGX" 25 #define DRV_VERSION "1.0" 26 27 struct lmac { 28 struct bgx *bgx; 29 int dmac; 30 u8 mac[ETH_ALEN]; 31 bool link_up; 32 int lmacid; /* ID within BGX */ 33 int lmacid_bd; /* ID on board */ 34 struct net_device netdev; 35 struct phy_device *phydev; 36 unsigned int last_duplex; 37 unsigned int last_link; 38 unsigned int last_speed; 39 bool is_sgmii; 40 struct delayed_work dwork; 41 struct workqueue_struct *check_link; 42 }; 43 44 struct bgx { 45 u8 bgx_id; 46 u8 qlm_mode; 47 struct lmac lmac[MAX_LMAC_PER_BGX]; 48 int lmac_count; 49 int lmac_type; 50 int lane_to_sds; 51 int use_training; 52 void __iomem *reg_base; 53 struct pci_dev *pdev; 54 }; 55 56 static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 57 static int lmac_count; /* Total no of LMACs in system */ 58 59 static int bgx_xaui_check_link(struct lmac *lmac); 60 61 /* Supported devices */ 62 static const struct pci_device_id bgx_id_table[] = { 63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, 64 { 0, } /* end of table */ 65 }; 66 67 MODULE_AUTHOR("Cavium Inc"); 68 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); 69 MODULE_LICENSE("GPL v2"); 70 MODULE_VERSION(DRV_VERSION); 71 MODULE_DEVICE_TABLE(pci, bgx_id_table); 72 73 /* The Cavium ThunderX network controller can *only* be found in SoCs 74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 75 * registers on this platform are implicitly strongly ordered with respect 76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 77 * with no memory barriers in this driver. The readq()/writeq() functions add 78 * explicit ordering operation which in this case are redundant, and only 79 * add overhead. 80 */ 81 82 /* Register read/write APIs */ 83 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) 84 { 85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 86 87 return readq_relaxed(addr); 88 } 89 90 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 91 { 92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 93 94 writeq_relaxed(val, addr); 95 } 96 97 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 98 { 99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 100 101 writeq_relaxed(val | readq_relaxed(addr), addr); 102 } 103 104 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) 105 { 106 int timeout = 100; 107 u64 reg_val; 108 109 while (timeout) { 110 reg_val = bgx_reg_read(bgx, lmac, reg); 111 if (zero && !(reg_val & mask)) 112 return 0; 113 if (!zero && (reg_val & mask)) 114 return 0; 115 usleep_range(1000, 2000); 116 timeout--; 117 } 118 return 1; 119 } 120 121 /* Return number of BGX present in HW */ 122 unsigned bgx_get_map(int node) 123 { 124 int i; 125 unsigned map = 0; 126 127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { 128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) 129 map |= (1 << i); 130 } 131 132 return map; 133 } 134 EXPORT_SYMBOL(bgx_get_map); 135 136 /* Return number of LMAC configured for this BGX */ 137 int bgx_get_lmac_count(int node, int bgx_idx) 138 { 139 struct bgx *bgx; 140 141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 142 if (bgx) 143 return bgx->lmac_count; 144 145 return 0; 146 } 147 EXPORT_SYMBOL(bgx_get_lmac_count); 148 149 /* Returns the current link status of LMAC */ 150 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 151 { 152 struct bgx_link_status *link = (struct bgx_link_status *)status; 153 struct bgx *bgx; 154 struct lmac *lmac; 155 156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 157 if (!bgx) 158 return; 159 160 lmac = &bgx->lmac[lmacid]; 161 link->link_up = lmac->link_up; 162 link->duplex = lmac->last_duplex; 163 link->speed = lmac->last_speed; 164 } 165 EXPORT_SYMBOL(bgx_get_lmac_link_state); 166 167 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 168 { 169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 170 171 if (bgx) 172 return bgx->lmac[lmacid].mac; 173 174 return NULL; 175 } 176 EXPORT_SYMBOL(bgx_get_lmac_mac); 177 178 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 179 { 180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 181 182 if (!bgx) 183 return; 184 185 ether_addr_copy(bgx->lmac[lmacid].mac, mac); 186 } 187 EXPORT_SYMBOL(bgx_set_lmac_mac); 188 189 static void bgx_sgmii_change_link_state(struct lmac *lmac) 190 { 191 struct bgx *bgx = lmac->bgx; 192 u64 cmr_cfg; 193 u64 port_cfg = 0; 194 u64 misc_ctl = 0; 195 196 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 197 cmr_cfg &= ~CMR_EN; 198 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 199 200 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 201 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 202 203 if (lmac->link_up) { 204 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 205 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 206 port_cfg |= (lmac->last_duplex << 2); 207 } else { 208 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 209 } 210 211 switch (lmac->last_speed) { 212 case 10: 213 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 214 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 215 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 216 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 217 misc_ctl |= 50; /* samp_pt */ 218 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 219 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 220 break; 221 case 100: 222 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 223 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 224 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 225 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 226 misc_ctl |= 5; /* samp_pt */ 227 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 228 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 229 break; 230 case 1000: 231 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 232 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 233 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 234 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 235 misc_ctl |= 1; /* samp_pt */ 236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 237 if (lmac->last_duplex) 238 bgx_reg_write(bgx, lmac->lmacid, 239 BGX_GMP_GMI_TXX_BURST, 0); 240 else 241 bgx_reg_write(bgx, lmac->lmacid, 242 BGX_GMP_GMI_TXX_BURST, 8192); 243 break; 244 default: 245 break; 246 } 247 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 248 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 249 250 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 251 252 /* renable lmac */ 253 cmr_cfg |= CMR_EN; 254 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 255 } 256 257 static void bgx_lmac_handler(struct net_device *netdev) 258 { 259 struct lmac *lmac = container_of(netdev, struct lmac, netdev); 260 struct phy_device *phydev = lmac->phydev; 261 int link_changed = 0; 262 263 if (!lmac) 264 return; 265 266 if (!phydev->link && lmac->last_link) 267 link_changed = -1; 268 269 if (phydev->link && 270 (lmac->last_duplex != phydev->duplex || 271 lmac->last_link != phydev->link || 272 lmac->last_speed != phydev->speed)) { 273 link_changed = 1; 274 } 275 276 lmac->last_link = phydev->link; 277 lmac->last_speed = phydev->speed; 278 lmac->last_duplex = phydev->duplex; 279 280 if (!link_changed) 281 return; 282 283 if (link_changed > 0) 284 lmac->link_up = true; 285 else 286 lmac->link_up = false; 287 288 if (lmac->is_sgmii) 289 bgx_sgmii_change_link_state(lmac); 290 else 291 bgx_xaui_check_link(lmac); 292 } 293 294 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 295 { 296 struct bgx *bgx; 297 298 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 299 if (!bgx) 300 return 0; 301 302 if (idx > 8) 303 lmac = 0; 304 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); 305 } 306 EXPORT_SYMBOL(bgx_get_rx_stats); 307 308 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 309 { 310 struct bgx *bgx; 311 312 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 313 if (!bgx) 314 return 0; 315 316 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); 317 } 318 EXPORT_SYMBOL(bgx_get_tx_stats); 319 320 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) 321 { 322 u64 offset; 323 324 while (bgx->lmac[lmac].dmac > 0) { 325 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + 326 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); 327 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); 328 bgx->lmac[lmac].dmac--; 329 } 330 } 331 332 /* Configure BGX LMAC in internal loopback mode */ 333 void bgx_lmac_internal_loopback(int node, int bgx_idx, 334 int lmac_idx, bool enable) 335 { 336 struct bgx *bgx; 337 struct lmac *lmac; 338 u64 cfg; 339 340 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 341 if (!bgx) 342 return; 343 344 lmac = &bgx->lmac[lmac_idx]; 345 if (lmac->is_sgmii) { 346 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 347 if (enable) 348 cfg |= PCS_MRX_CTL_LOOPBACK1; 349 else 350 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 351 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 352 } else { 353 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 354 if (enable) 355 cfg |= SPU_CTL_LOOPBACK; 356 else 357 cfg &= ~SPU_CTL_LOOPBACK; 358 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 359 } 360 } 361 EXPORT_SYMBOL(bgx_lmac_internal_loopback); 362 363 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) 364 { 365 u64 cfg; 366 367 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 368 /* max packet size */ 369 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 370 371 /* Disable frame alignment if using preamble */ 372 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 373 if (cfg & 1) 374 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 375 376 /* Enable lmac */ 377 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 378 379 /* PCS reset */ 380 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 381 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 382 PCS_MRX_CTL_RESET, true)) { 383 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); 384 return -1; 385 } 386 387 /* power down, reset autoneg, autoneg enable */ 388 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 389 cfg &= ~PCS_MRX_CTL_PWR_DN; 390 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 391 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 392 393 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 394 PCS_MRX_STATUS_AN_CPT, false)) { 395 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 396 return -1; 397 } 398 399 return 0; 400 } 401 402 static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) 403 { 404 u64 cfg; 405 406 /* Reset SPU */ 407 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 408 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 409 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 410 return -1; 411 } 412 413 /* Disable LMAC */ 414 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 415 cfg &= ~CMR_EN; 416 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 417 418 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 419 /* Set interleaved running disparity for RXAUI */ 420 if (bgx->lmac_type != BGX_MODE_RXAUI) 421 bgx_reg_modify(bgx, lmacid, 422 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 423 else 424 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 425 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); 426 427 /* clear all interrupts */ 428 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 429 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 430 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 431 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 432 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 433 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 434 435 if (bgx->use_training) { 436 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 437 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 438 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 439 /* training enable */ 440 bgx_reg_modify(bgx, lmacid, 441 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); 442 } 443 444 /* Append FCS to each packet */ 445 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 446 447 /* Disable forward error correction */ 448 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 449 cfg &= ~SPU_FEC_CTL_FEC_EN; 450 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 451 452 /* Disable autoneg */ 453 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 454 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 455 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 456 457 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 458 if (bgx->lmac_type == BGX_MODE_10G_KR) 459 cfg |= (1 << 23); 460 else if (bgx->lmac_type == BGX_MODE_40G_KR) 461 cfg |= (1 << 24); 462 else 463 cfg &= ~((1 << 23) | (1 << 24)); 464 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); 465 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 466 467 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 468 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 469 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 470 471 /* Enable lmac */ 472 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 473 474 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 475 cfg &= ~SPU_CTL_LOW_POWER; 476 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 477 478 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 479 cfg &= ~SMU_TX_CTL_UNI_EN; 480 cfg |= SMU_TX_CTL_DIC_EN; 481 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 482 483 /* take lmac_count into account */ 484 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 485 /* max packet size */ 486 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 487 488 return 0; 489 } 490 491 static int bgx_xaui_check_link(struct lmac *lmac) 492 { 493 struct bgx *bgx = lmac->bgx; 494 int lmacid = lmac->lmacid; 495 int lmac_type = bgx->lmac_type; 496 u64 cfg; 497 498 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 499 if (bgx->use_training) { 500 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 501 if (!(cfg & (1ull << 13))) { 502 cfg = (1ull << 13) | (1ull << 14); 503 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 504 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 505 cfg |= (1ull << 0); 506 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 507 return -1; 508 } 509 } 510 511 /* wait for PCS to come out of reset */ 512 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 513 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 514 return -1; 515 } 516 517 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 518 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 519 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 520 SPU_BR_STATUS_BLK_LOCK, false)) { 521 dev_err(&bgx->pdev->dev, 522 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 523 return -1; 524 } 525 } else { 526 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 527 SPU_BX_STATUS_RX_ALIGN, false)) { 528 dev_err(&bgx->pdev->dev, 529 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 530 return -1; 531 } 532 } 533 534 /* Clear rcvflt bit (latching high) and read it back */ 535 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 536 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 537 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 538 if (bgx->use_training) { 539 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 540 if (!(cfg & (1ull << 13))) { 541 cfg = (1ull << 13) | (1ull << 14); 542 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 543 cfg = bgx_reg_read(bgx, lmacid, 544 BGX_SPUX_BR_PMD_CRTL); 545 cfg |= (1ull << 0); 546 bgx_reg_write(bgx, lmacid, 547 BGX_SPUX_BR_PMD_CRTL, cfg); 548 return -1; 549 } 550 } 551 return -1; 552 } 553 554 /* Wait for MAC RX to be ready */ 555 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, 556 SMU_RX_CTL_STATUS, true)) { 557 dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); 558 return -1; 559 } 560 561 /* Wait for BGX RX to be idle */ 562 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 563 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 564 return -1; 565 } 566 567 /* Wait for BGX TX to be idle */ 568 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { 569 dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); 570 return -1; 571 } 572 573 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 574 dev_err(&bgx->pdev->dev, "Receive fault\n"); 575 return -1; 576 } 577 578 /* Receive link is latching low. Force it high and verify it */ 579 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 580 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, 581 SPU_STATUS1_RCV_LNK, false)) { 582 dev_err(&bgx->pdev->dev, "SPU receive link down\n"); 583 return -1; 584 } 585 586 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 587 cfg &= ~SPU_MISC_CTL_RX_DIS; 588 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 589 return 0; 590 } 591 592 static void bgx_poll_for_link(struct work_struct *work) 593 { 594 struct lmac *lmac; 595 u64 link; 596 597 lmac = container_of(work, struct lmac, dwork.work); 598 599 /* Receive link is latching low. Force it high and verify it */ 600 bgx_reg_modify(lmac->bgx, lmac->lmacid, 601 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 602 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 603 SPU_STATUS1_RCV_LNK, false); 604 605 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 606 if (link & SPU_STATUS1_RCV_LNK) { 607 lmac->link_up = 1; 608 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) 609 lmac->last_speed = 40000; 610 else 611 lmac->last_speed = 10000; 612 lmac->last_duplex = 1; 613 } else { 614 lmac->link_up = 0; 615 } 616 617 if (lmac->last_link != lmac->link_up) { 618 lmac->last_link = lmac->link_up; 619 if (lmac->link_up) 620 bgx_xaui_check_link(lmac); 621 } 622 623 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 624 } 625 626 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 627 { 628 struct lmac *lmac; 629 u64 cfg; 630 631 lmac = &bgx->lmac[lmacid]; 632 lmac->bgx = bgx; 633 634 if (bgx->lmac_type == BGX_MODE_SGMII) { 635 lmac->is_sgmii = 1; 636 if (bgx_lmac_sgmii_init(bgx, lmacid)) 637 return -1; 638 } else { 639 lmac->is_sgmii = 0; 640 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) 641 return -1; 642 } 643 644 if (lmac->is_sgmii) { 645 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 646 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 647 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 648 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 649 } else { 650 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 651 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 652 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 653 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 654 } 655 656 /* Enable lmac */ 657 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, 658 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); 659 660 /* Restore default cfg, incase low level firmware changed it */ 661 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 662 663 if ((bgx->lmac_type != BGX_MODE_XFI) && 664 (bgx->lmac_type != BGX_MODE_XLAUI) && 665 (bgx->lmac_type != BGX_MODE_40G_KR) && 666 (bgx->lmac_type != BGX_MODE_10G_KR)) { 667 if (!lmac->phydev) 668 return -ENODEV; 669 670 lmac->phydev->dev_flags = 0; 671 672 if (phy_connect_direct(&lmac->netdev, lmac->phydev, 673 bgx_lmac_handler, 674 PHY_INTERFACE_MODE_SGMII)) 675 return -ENODEV; 676 677 phy_start_aneg(lmac->phydev); 678 } else { 679 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 680 WQ_MEM_RECLAIM, 1); 681 if (!lmac->check_link) 682 return -ENOMEM; 683 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 684 queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 685 } 686 687 return 0; 688 } 689 690 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 691 { 692 struct lmac *lmac; 693 u64 cmrx_cfg; 694 695 lmac = &bgx->lmac[lmacid]; 696 if (lmac->check_link) { 697 /* Destroy work queue */ 698 cancel_delayed_work(&lmac->dwork); 699 flush_workqueue(lmac->check_link); 700 destroy_workqueue(lmac->check_link); 701 } 702 703 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 704 cmrx_cfg &= ~(1 << 15); 705 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 706 bgx_flush_dmac_addrs(bgx, lmacid); 707 708 if ((bgx->lmac_type != BGX_MODE_XFI) && 709 (bgx->lmac_type != BGX_MODE_XLAUI) && 710 (bgx->lmac_type != BGX_MODE_40G_KR) && 711 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 712 phy_disconnect(lmac->phydev); 713 714 lmac->phydev = NULL; 715 } 716 717 static void bgx_set_num_ports(struct bgx *bgx) 718 { 719 u64 lmac_count; 720 721 switch (bgx->qlm_mode) { 722 case QLM_MODE_SGMII: 723 bgx->lmac_count = 4; 724 bgx->lmac_type = BGX_MODE_SGMII; 725 bgx->lane_to_sds = 0; 726 break; 727 case QLM_MODE_XAUI_1X4: 728 bgx->lmac_count = 1; 729 bgx->lmac_type = BGX_MODE_XAUI; 730 bgx->lane_to_sds = 0xE4; 731 break; 732 case QLM_MODE_RXAUI_2X2: 733 bgx->lmac_count = 2; 734 bgx->lmac_type = BGX_MODE_RXAUI; 735 bgx->lane_to_sds = 0xE4; 736 break; 737 case QLM_MODE_XFI_4X1: 738 bgx->lmac_count = 4; 739 bgx->lmac_type = BGX_MODE_XFI; 740 bgx->lane_to_sds = 0; 741 break; 742 case QLM_MODE_XLAUI_1X4: 743 bgx->lmac_count = 1; 744 bgx->lmac_type = BGX_MODE_XLAUI; 745 bgx->lane_to_sds = 0xE4; 746 break; 747 case QLM_MODE_10G_KR_4X1: 748 bgx->lmac_count = 4; 749 bgx->lmac_type = BGX_MODE_10G_KR; 750 bgx->lane_to_sds = 0; 751 bgx->use_training = 1; 752 break; 753 case QLM_MODE_40G_KR4_1X4: 754 bgx->lmac_count = 1; 755 bgx->lmac_type = BGX_MODE_40G_KR; 756 bgx->lane_to_sds = 0xE4; 757 bgx->use_training = 1; 758 break; 759 default: 760 bgx->lmac_count = 0; 761 break; 762 } 763 764 /* Check if low level firmware has programmed LMAC count 765 * based on board type, if yes consider that otherwise 766 * the default static values 767 */ 768 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 769 if (lmac_count != 4) 770 bgx->lmac_count = lmac_count; 771 } 772 773 static void bgx_init_hw(struct bgx *bgx) 774 { 775 int i; 776 777 bgx_set_num_ports(bgx); 778 779 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 780 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 781 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); 782 783 /* Set lmac type and lane2serdes mapping */ 784 for (i = 0; i < bgx->lmac_count; i++) { 785 if (bgx->lmac_type == BGX_MODE_RXAUI) { 786 if (i) 787 bgx->lane_to_sds = 0x0e; 788 else 789 bgx->lane_to_sds = 0x04; 790 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 791 (bgx->lmac_type << 8) | bgx->lane_to_sds); 792 continue; 793 } 794 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 795 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); 796 bgx->lmac[i].lmacid_bd = lmac_count; 797 lmac_count++; 798 } 799 800 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 801 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 802 803 /* Set the backpressure AND mask */ 804 for (i = 0; i < bgx->lmac_count; i++) 805 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 806 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << 807 (i * MAX_BGX_CHANS_PER_LMAC)); 808 809 /* Disable all MAC filtering */ 810 for (i = 0; i < RX_DMAC_COUNT; i++) 811 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 812 813 /* Disable MAC steering (NCSI traffic) */ 814 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 815 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 816 } 817 818 static void bgx_get_qlm_mode(struct bgx *bgx) 819 { 820 struct device *dev = &bgx->pdev->dev; 821 int lmac_type; 822 int train_en; 823 824 /* Read LMAC0 type to figure out QLM mode 825 * This is configured by low level firmware 826 */ 827 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 828 lmac_type = (lmac_type >> 8) & 0x07; 829 830 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & 831 SPU_PMD_CRTL_TRAIN_EN; 832 833 switch (lmac_type) { 834 case BGX_MODE_SGMII: 835 bgx->qlm_mode = QLM_MODE_SGMII; 836 dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); 837 break; 838 case BGX_MODE_XAUI: 839 bgx->qlm_mode = QLM_MODE_XAUI_1X4; 840 dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); 841 break; 842 case BGX_MODE_RXAUI: 843 bgx->qlm_mode = QLM_MODE_RXAUI_2X2; 844 dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); 845 break; 846 case BGX_MODE_XFI: 847 if (!train_en) { 848 bgx->qlm_mode = QLM_MODE_XFI_4X1; 849 dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); 850 } else { 851 bgx->qlm_mode = QLM_MODE_10G_KR_4X1; 852 dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); 853 } 854 break; 855 case BGX_MODE_XLAUI: 856 if (!train_en) { 857 bgx->qlm_mode = QLM_MODE_XLAUI_1X4; 858 dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); 859 } else { 860 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; 861 dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); 862 } 863 break; 864 default: 865 bgx->qlm_mode = QLM_MODE_SGMII; 866 dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); 867 } 868 } 869 870 #ifdef CONFIG_ACPI 871 872 static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) 873 { 874 u8 mac[ETH_ALEN]; 875 int ret; 876 877 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 878 "mac-address", mac, ETH_ALEN); 879 if (ret) 880 goto out; 881 882 if (!is_valid_ether_addr(mac)) { 883 ret = -EINVAL; 884 goto out; 885 } 886 887 memcpy(dst, mac, ETH_ALEN); 888 out: 889 return ret; 890 } 891 892 /* Currently only sets the MAC address. */ 893 static acpi_status bgx_acpi_register_phy(acpi_handle handle, 894 u32 lvl, void *context, void **rv) 895 { 896 struct bgx *bgx = context; 897 struct acpi_device *adev; 898 899 if (acpi_bus_get_device(handle, &adev)) 900 goto out; 901 902 acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); 903 904 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); 905 906 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 907 out: 908 bgx->lmac_count++; 909 return AE_OK; 910 } 911 912 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, 913 void *context, void **ret_val) 914 { 915 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 916 struct bgx *bgx = context; 917 char bgx_sel[5]; 918 919 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); 920 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { 921 pr_warn("Invalid link device\n"); 922 return AE_OK; 923 } 924 925 if (strncmp(string.pointer, bgx_sel, 4)) 926 return AE_OK; 927 928 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 929 bgx_acpi_register_phy, NULL, bgx, NULL); 930 931 kfree(string.pointer); 932 return AE_CTRL_TERMINATE; 933 } 934 935 static int bgx_init_acpi_phy(struct bgx *bgx) 936 { 937 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); 938 return 0; 939 } 940 941 #else 942 943 static int bgx_init_acpi_phy(struct bgx *bgx) 944 { 945 return -ENODEV; 946 } 947 948 #endif /* CONFIG_ACPI */ 949 950 #if IS_ENABLED(CONFIG_OF_MDIO) 951 952 static int bgx_init_of_phy(struct bgx *bgx) 953 { 954 struct device_node *np; 955 struct device_node *np_child; 956 u8 lmac = 0; 957 char bgx_sel[5]; 958 const char *mac; 959 960 /* Get BGX node from DT */ 961 snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); 962 np = of_find_node_by_name(NULL, bgx_sel); 963 if (!np) 964 return -ENODEV; 965 966 for_each_child_of_node(np, np_child) { 967 struct device_node *phy_np = of_parse_phandle(np_child, 968 "phy-handle", 0); 969 if (!phy_np) 970 continue; 971 bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); 972 973 mac = of_get_mac_address(np_child); 974 if (mac) 975 ether_addr_copy(bgx->lmac[lmac].mac, mac); 976 977 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 978 bgx->lmac[lmac].lmacid = lmac; 979 lmac++; 980 if (lmac == MAX_LMAC_PER_BGX) { 981 of_node_put(np_child); 982 break; 983 } 984 } 985 return 0; 986 } 987 988 #else 989 990 static int bgx_init_of_phy(struct bgx *bgx) 991 { 992 return -ENODEV; 993 } 994 995 #endif /* CONFIG_OF_MDIO */ 996 997 static int bgx_init_phy(struct bgx *bgx) 998 { 999 if (!acpi_disabled) 1000 return bgx_init_acpi_phy(bgx); 1001 1002 return bgx_init_of_phy(bgx); 1003 } 1004 1005 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1006 { 1007 int err; 1008 struct device *dev = &pdev->dev; 1009 struct bgx *bgx = NULL; 1010 u8 lmac; 1011 1012 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1013 if (!bgx) 1014 return -ENOMEM; 1015 bgx->pdev = pdev; 1016 1017 pci_set_drvdata(pdev, bgx); 1018 1019 err = pci_enable_device(pdev); 1020 if (err) { 1021 dev_err(dev, "Failed to enable PCI device\n"); 1022 pci_set_drvdata(pdev, NULL); 1023 return err; 1024 } 1025 1026 err = pci_request_regions(pdev, DRV_NAME); 1027 if (err) { 1028 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1029 goto err_disable_device; 1030 } 1031 1032 /* MAP configuration registers */ 1033 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1034 if (!bgx->reg_base) { 1035 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); 1036 err = -ENOMEM; 1037 goto err_release_regions; 1038 } 1039 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1040 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; 1041 1042 bgx_vnic[bgx->bgx_id] = bgx; 1043 bgx_get_qlm_mode(bgx); 1044 1045 err = bgx_init_phy(bgx); 1046 if (err) 1047 goto err_enable; 1048 1049 bgx_init_hw(bgx); 1050 1051 /* Enable all LMACs */ 1052 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1053 err = bgx_lmac_enable(bgx, lmac); 1054 if (err) { 1055 dev_err(dev, "BGX%d failed to enable lmac%d\n", 1056 bgx->bgx_id, lmac); 1057 goto err_enable; 1058 } 1059 } 1060 1061 return 0; 1062 1063 err_enable: 1064 bgx_vnic[bgx->bgx_id] = NULL; 1065 err_release_regions: 1066 pci_release_regions(pdev); 1067 err_disable_device: 1068 pci_disable_device(pdev); 1069 pci_set_drvdata(pdev, NULL); 1070 return err; 1071 } 1072 1073 static void bgx_remove(struct pci_dev *pdev) 1074 { 1075 struct bgx *bgx = pci_get_drvdata(pdev); 1076 u8 lmac; 1077 1078 /* Disable all LMACs */ 1079 for (lmac = 0; lmac < bgx->lmac_count; lmac++) 1080 bgx_lmac_disable(bgx, lmac); 1081 1082 bgx_vnic[bgx->bgx_id] = NULL; 1083 pci_release_regions(pdev); 1084 pci_disable_device(pdev); 1085 pci_set_drvdata(pdev, NULL); 1086 } 1087 1088 static struct pci_driver bgx_driver = { 1089 .name = DRV_NAME, 1090 .id_table = bgx_id_table, 1091 .probe = bgx_probe, 1092 .remove = bgx_remove, 1093 }; 1094 1095 static int __init bgx_init_module(void) 1096 { 1097 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1098 1099 return pci_register_driver(&bgx_driver); 1100 } 1101 1102 static void __exit bgx_cleanup_module(void) 1103 { 1104 pci_unregister_driver(&bgx_driver); 1105 } 1106 1107 module_init(bgx_init_module); 1108 module_exit(bgx_cleanup_module); 1109