1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/module.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/phy.h> 16 #include <linux/of.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 20 #include "nic_reg.h" 21 #include "nic.h" 22 #include "thunder_bgx.h" 23 24 #define DRV_NAME "thunder-BGX" 25 #define DRV_VERSION "1.0" 26 27 struct lmac { 28 struct bgx *bgx; 29 int dmac; 30 u8 mac[ETH_ALEN]; 31 bool link_up; 32 int lmacid; /* ID within BGX */ 33 int lmacid_bd; /* ID on board */ 34 struct net_device netdev; 35 struct phy_device *phydev; 36 unsigned int last_duplex; 37 unsigned int last_link; 38 unsigned int last_speed; 39 bool is_sgmii; 40 struct delayed_work dwork; 41 struct workqueue_struct *check_link; 42 }; 43 44 struct bgx { 45 u8 bgx_id; 46 u8 qlm_mode; 47 struct lmac lmac[MAX_LMAC_PER_BGX]; 48 int lmac_count; 49 int lmac_type; 50 int lane_to_sds; 51 int use_training; 52 void __iomem *reg_base; 53 struct pci_dev *pdev; 54 }; 55 56 static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 57 static int lmac_count; /* Total no of LMACs in system */ 58 59 static int bgx_xaui_check_link(struct lmac *lmac); 60 61 /* Supported devices */ 62 static const struct pci_device_id bgx_id_table[] = { 63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, 64 { 0, } /* end of table */ 65 }; 66 67 MODULE_AUTHOR("Cavium Inc"); 68 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); 69 MODULE_LICENSE("GPL v2"); 70 MODULE_VERSION(DRV_VERSION); 71 MODULE_DEVICE_TABLE(pci, bgx_id_table); 72 73 /* The Cavium ThunderX network controller can *only* be found in SoCs 74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 75 * registers on this platform are implicitly strongly ordered with respect 76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 77 * with no memory barriers in this driver. The readq()/writeq() functions add 78 * explicit ordering operation which in this case are redundant, and only 79 * add overhead. 80 */ 81 82 /* Register read/write APIs */ 83 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) 84 { 85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 86 87 return readq_relaxed(addr); 88 } 89 90 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 91 { 92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 93 94 writeq_relaxed(val, addr); 95 } 96 97 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 98 { 99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 100 101 writeq_relaxed(val | readq_relaxed(addr), addr); 102 } 103 104 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) 105 { 106 int timeout = 100; 107 u64 reg_val; 108 109 while (timeout) { 110 reg_val = bgx_reg_read(bgx, lmac, reg); 111 if (zero && !(reg_val & mask)) 112 return 0; 113 if (!zero && (reg_val & mask)) 114 return 0; 115 usleep_range(1000, 2000); 116 timeout--; 117 } 118 return 1; 119 } 120 121 /* Return number of BGX present in HW */ 122 unsigned bgx_get_map(int node) 123 { 124 int i; 125 unsigned map = 0; 126 127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { 128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) 129 map |= (1 << i); 130 } 131 132 return map; 133 } 134 EXPORT_SYMBOL(bgx_get_map); 135 136 /* Return number of LMAC configured for this BGX */ 137 int bgx_get_lmac_count(int node, int bgx_idx) 138 { 139 struct bgx *bgx; 140 141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 142 if (bgx) 143 return bgx->lmac_count; 144 145 return 0; 146 } 147 EXPORT_SYMBOL(bgx_get_lmac_count); 148 149 /* Returns the current link status of LMAC */ 150 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 151 { 152 struct bgx_link_status *link = (struct bgx_link_status *)status; 153 struct bgx *bgx; 154 struct lmac *lmac; 155 156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 157 if (!bgx) 158 return; 159 160 lmac = &bgx->lmac[lmacid]; 161 link->link_up = lmac->link_up; 162 link->duplex = lmac->last_duplex; 163 link->speed = lmac->last_speed; 164 } 165 EXPORT_SYMBOL(bgx_get_lmac_link_state); 166 167 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 168 { 169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 170 171 if (bgx) 172 return bgx->lmac[lmacid].mac; 173 174 return NULL; 175 } 176 EXPORT_SYMBOL(bgx_get_lmac_mac); 177 178 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 179 { 180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 181 182 if (!bgx) 183 return; 184 185 ether_addr_copy(bgx->lmac[lmacid].mac, mac); 186 } 187 EXPORT_SYMBOL(bgx_set_lmac_mac); 188 189 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 190 { 191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 192 u64 cfg; 193 194 if (!bgx) 195 return; 196 197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 198 if (enable) 199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; 200 else 201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 203 } 204 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); 205 206 static void bgx_sgmii_change_link_state(struct lmac *lmac) 207 { 208 struct bgx *bgx = lmac->bgx; 209 u64 cmr_cfg; 210 u64 port_cfg = 0; 211 u64 misc_ctl = 0; 212 213 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 214 cmr_cfg &= ~CMR_EN; 215 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 216 217 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 218 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 219 220 if (lmac->link_up) { 221 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 222 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 223 port_cfg |= (lmac->last_duplex << 2); 224 } else { 225 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 226 } 227 228 switch (lmac->last_speed) { 229 case 10: 230 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 231 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 232 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 233 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 234 misc_ctl |= 50; /* samp_pt */ 235 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 237 break; 238 case 100: 239 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 240 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 241 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 242 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 243 misc_ctl |= 5; /* samp_pt */ 244 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 245 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 246 break; 247 case 1000: 248 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 249 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 250 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 251 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 252 misc_ctl |= 1; /* samp_pt */ 253 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 254 if (lmac->last_duplex) 255 bgx_reg_write(bgx, lmac->lmacid, 256 BGX_GMP_GMI_TXX_BURST, 0); 257 else 258 bgx_reg_write(bgx, lmac->lmacid, 259 BGX_GMP_GMI_TXX_BURST, 8192); 260 break; 261 default: 262 break; 263 } 264 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 265 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 266 267 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 268 269 /* renable lmac */ 270 cmr_cfg |= CMR_EN; 271 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 272 } 273 274 static void bgx_lmac_handler(struct net_device *netdev) 275 { 276 struct lmac *lmac = container_of(netdev, struct lmac, netdev); 277 struct phy_device *phydev; 278 int link_changed = 0; 279 280 if (!lmac) 281 return; 282 283 phydev = lmac->phydev; 284 285 if (!phydev->link && lmac->last_link) 286 link_changed = -1; 287 288 if (phydev->link && 289 (lmac->last_duplex != phydev->duplex || 290 lmac->last_link != phydev->link || 291 lmac->last_speed != phydev->speed)) { 292 link_changed = 1; 293 } 294 295 lmac->last_link = phydev->link; 296 lmac->last_speed = phydev->speed; 297 lmac->last_duplex = phydev->duplex; 298 299 if (!link_changed) 300 return; 301 302 if (link_changed > 0) 303 lmac->link_up = true; 304 else 305 lmac->link_up = false; 306 307 if (lmac->is_sgmii) 308 bgx_sgmii_change_link_state(lmac); 309 else 310 bgx_xaui_check_link(lmac); 311 } 312 313 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 314 { 315 struct bgx *bgx; 316 317 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 318 if (!bgx) 319 return 0; 320 321 if (idx > 8) 322 lmac = 0; 323 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); 324 } 325 EXPORT_SYMBOL(bgx_get_rx_stats); 326 327 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 328 { 329 struct bgx *bgx; 330 331 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 332 if (!bgx) 333 return 0; 334 335 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); 336 } 337 EXPORT_SYMBOL(bgx_get_tx_stats); 338 339 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) 340 { 341 u64 offset; 342 343 while (bgx->lmac[lmac].dmac > 0) { 344 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + 345 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); 346 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); 347 bgx->lmac[lmac].dmac--; 348 } 349 } 350 351 /* Configure BGX LMAC in internal loopback mode */ 352 void bgx_lmac_internal_loopback(int node, int bgx_idx, 353 int lmac_idx, bool enable) 354 { 355 struct bgx *bgx; 356 struct lmac *lmac; 357 u64 cfg; 358 359 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 360 if (!bgx) 361 return; 362 363 lmac = &bgx->lmac[lmac_idx]; 364 if (lmac->is_sgmii) { 365 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 366 if (enable) 367 cfg |= PCS_MRX_CTL_LOOPBACK1; 368 else 369 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 370 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 371 } else { 372 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 373 if (enable) 374 cfg |= SPU_CTL_LOOPBACK; 375 else 376 cfg &= ~SPU_CTL_LOOPBACK; 377 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 378 } 379 } 380 EXPORT_SYMBOL(bgx_lmac_internal_loopback); 381 382 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) 383 { 384 u64 cfg; 385 386 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 387 /* max packet size */ 388 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 389 390 /* Disable frame alignment if using preamble */ 391 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 392 if (cfg & 1) 393 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 394 395 /* Enable lmac */ 396 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 397 398 /* PCS reset */ 399 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 400 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 401 PCS_MRX_CTL_RESET, true)) { 402 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); 403 return -1; 404 } 405 406 /* power down, reset autoneg, autoneg enable */ 407 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 408 cfg &= ~PCS_MRX_CTL_PWR_DN; 409 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 410 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 411 412 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 413 PCS_MRX_STATUS_AN_CPT, false)) { 414 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 415 return -1; 416 } 417 418 return 0; 419 } 420 421 static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) 422 { 423 u64 cfg; 424 425 /* Reset SPU */ 426 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 427 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 428 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 429 return -1; 430 } 431 432 /* Disable LMAC */ 433 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 434 cfg &= ~CMR_EN; 435 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 436 437 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 438 /* Set interleaved running disparity for RXAUI */ 439 if (bgx->lmac_type != BGX_MODE_RXAUI) 440 bgx_reg_modify(bgx, lmacid, 441 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 442 else 443 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 444 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); 445 446 /* clear all interrupts */ 447 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 448 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 449 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 450 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 451 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 452 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 453 454 if (bgx->use_training) { 455 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 456 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 457 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 458 /* training enable */ 459 bgx_reg_modify(bgx, lmacid, 460 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); 461 } 462 463 /* Append FCS to each packet */ 464 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 465 466 /* Disable forward error correction */ 467 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 468 cfg &= ~SPU_FEC_CTL_FEC_EN; 469 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 470 471 /* Disable autoneg */ 472 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 473 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 474 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 475 476 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 477 if (bgx->lmac_type == BGX_MODE_10G_KR) 478 cfg |= (1 << 23); 479 else if (bgx->lmac_type == BGX_MODE_40G_KR) 480 cfg |= (1 << 24); 481 else 482 cfg &= ~((1 << 23) | (1 << 24)); 483 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); 484 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 485 486 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 487 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 488 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 489 490 /* Enable lmac */ 491 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 492 493 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 494 cfg &= ~SPU_CTL_LOW_POWER; 495 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 496 497 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 498 cfg &= ~SMU_TX_CTL_UNI_EN; 499 cfg |= SMU_TX_CTL_DIC_EN; 500 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 501 502 /* take lmac_count into account */ 503 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 504 /* max packet size */ 505 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 506 507 return 0; 508 } 509 510 static int bgx_xaui_check_link(struct lmac *lmac) 511 { 512 struct bgx *bgx = lmac->bgx; 513 int lmacid = lmac->lmacid; 514 int lmac_type = bgx->lmac_type; 515 u64 cfg; 516 517 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 518 if (bgx->use_training) { 519 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 520 if (!(cfg & (1ull << 13))) { 521 cfg = (1ull << 13) | (1ull << 14); 522 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 523 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 524 cfg |= (1ull << 0); 525 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 526 return -1; 527 } 528 } 529 530 /* wait for PCS to come out of reset */ 531 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 532 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 533 return -1; 534 } 535 536 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 537 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 538 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 539 SPU_BR_STATUS_BLK_LOCK, false)) { 540 dev_err(&bgx->pdev->dev, 541 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 542 return -1; 543 } 544 } else { 545 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 546 SPU_BX_STATUS_RX_ALIGN, false)) { 547 dev_err(&bgx->pdev->dev, 548 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 549 return -1; 550 } 551 } 552 553 /* Clear rcvflt bit (latching high) and read it back */ 554 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) 555 bgx_reg_modify(bgx, lmacid, 556 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 557 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 558 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 559 if (bgx->use_training) { 560 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 561 if (!(cfg & (1ull << 13))) { 562 cfg = (1ull << 13) | (1ull << 14); 563 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 564 cfg = bgx_reg_read(bgx, lmacid, 565 BGX_SPUX_BR_PMD_CRTL); 566 cfg |= (1ull << 0); 567 bgx_reg_write(bgx, lmacid, 568 BGX_SPUX_BR_PMD_CRTL, cfg); 569 return -1; 570 } 571 } 572 return -1; 573 } 574 575 /* Wait for BGX RX to be idle */ 576 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 577 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 578 return -1; 579 } 580 581 /* Wait for BGX TX to be idle */ 582 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { 583 dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); 584 return -1; 585 } 586 587 /* Clear receive packet disable */ 588 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 589 cfg &= ~SPU_MISC_CTL_RX_DIS; 590 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 591 592 /* Check for MAC RX faults */ 593 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); 594 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ 595 cfg &= SMU_RX_CTL_STATUS; 596 if (!cfg) 597 return 0; 598 599 /* Rx local/remote fault seen. 600 * Do lmac reinit to see if condition recovers 601 */ 602 bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type); 603 604 return -1; 605 } 606 607 static void bgx_poll_for_link(struct work_struct *work) 608 { 609 struct lmac *lmac; 610 u64 spu_link, smu_link; 611 612 lmac = container_of(work, struct lmac, dwork.work); 613 614 /* Receive link is latching low. Force it high and verify it */ 615 bgx_reg_modify(lmac->bgx, lmac->lmacid, 616 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 617 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 618 SPU_STATUS1_RCV_LNK, false); 619 620 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 621 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); 622 623 if ((spu_link & SPU_STATUS1_RCV_LNK) && 624 !(smu_link & SMU_RX_CTL_STATUS)) { 625 lmac->link_up = 1; 626 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) 627 lmac->last_speed = 40000; 628 else 629 lmac->last_speed = 10000; 630 lmac->last_duplex = 1; 631 } else { 632 lmac->link_up = 0; 633 lmac->last_speed = SPEED_UNKNOWN; 634 lmac->last_duplex = DUPLEX_UNKNOWN; 635 } 636 637 if (lmac->last_link != lmac->link_up) { 638 if (lmac->link_up) { 639 if (bgx_xaui_check_link(lmac)) { 640 /* Errors, clear link_up state */ 641 lmac->link_up = 0; 642 lmac->last_speed = SPEED_UNKNOWN; 643 lmac->last_duplex = DUPLEX_UNKNOWN; 644 } 645 } 646 lmac->last_link = lmac->link_up; 647 } 648 649 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 650 } 651 652 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 653 { 654 struct lmac *lmac; 655 u64 cfg; 656 657 lmac = &bgx->lmac[lmacid]; 658 lmac->bgx = bgx; 659 660 if (bgx->lmac_type == BGX_MODE_SGMII) { 661 lmac->is_sgmii = 1; 662 if (bgx_lmac_sgmii_init(bgx, lmacid)) 663 return -1; 664 } else { 665 lmac->is_sgmii = 0; 666 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) 667 return -1; 668 } 669 670 if (lmac->is_sgmii) { 671 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 672 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 673 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 674 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 675 } else { 676 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 677 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 678 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 679 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 680 } 681 682 /* Enable lmac */ 683 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 684 685 /* Restore default cfg, incase low level firmware changed it */ 686 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 687 688 if ((bgx->lmac_type != BGX_MODE_XFI) && 689 (bgx->lmac_type != BGX_MODE_XLAUI) && 690 (bgx->lmac_type != BGX_MODE_40G_KR) && 691 (bgx->lmac_type != BGX_MODE_10G_KR)) { 692 if (!lmac->phydev) 693 return -ENODEV; 694 695 lmac->phydev->dev_flags = 0; 696 697 if (phy_connect_direct(&lmac->netdev, lmac->phydev, 698 bgx_lmac_handler, 699 PHY_INTERFACE_MODE_SGMII)) 700 return -ENODEV; 701 702 phy_start_aneg(lmac->phydev); 703 } else { 704 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 705 WQ_MEM_RECLAIM, 1); 706 if (!lmac->check_link) 707 return -ENOMEM; 708 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 709 queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 710 } 711 712 return 0; 713 } 714 715 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 716 { 717 struct lmac *lmac; 718 u64 cfg; 719 720 lmac = &bgx->lmac[lmacid]; 721 if (lmac->check_link) { 722 /* Destroy work queue */ 723 cancel_delayed_work_sync(&lmac->dwork); 724 destroy_workqueue(lmac->check_link); 725 } 726 727 /* Disable packet reception */ 728 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 729 cfg &= ~CMR_PKT_RX_EN; 730 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 731 732 /* Give chance for Rx/Tx FIFO to get drained */ 733 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); 734 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); 735 736 /* Disable packet transmission */ 737 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 738 cfg &= ~CMR_PKT_TX_EN; 739 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 740 741 /* Disable serdes lanes */ 742 if (!lmac->is_sgmii) 743 bgx_reg_modify(bgx, lmacid, 744 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 745 else 746 bgx_reg_modify(bgx, lmacid, 747 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); 748 749 /* Disable LMAC */ 750 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 751 cfg &= ~CMR_EN; 752 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 753 754 bgx_flush_dmac_addrs(bgx, lmacid); 755 756 if ((bgx->lmac_type != BGX_MODE_XFI) && 757 (bgx->lmac_type != BGX_MODE_XLAUI) && 758 (bgx->lmac_type != BGX_MODE_40G_KR) && 759 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 760 phy_disconnect(lmac->phydev); 761 762 lmac->phydev = NULL; 763 } 764 765 static void bgx_set_num_ports(struct bgx *bgx) 766 { 767 u64 lmac_count; 768 769 switch (bgx->qlm_mode) { 770 case QLM_MODE_SGMII: 771 bgx->lmac_count = 4; 772 bgx->lmac_type = BGX_MODE_SGMII; 773 bgx->lane_to_sds = 0; 774 break; 775 case QLM_MODE_XAUI_1X4: 776 bgx->lmac_count = 1; 777 bgx->lmac_type = BGX_MODE_XAUI; 778 bgx->lane_to_sds = 0xE4; 779 break; 780 case QLM_MODE_RXAUI_2X2: 781 bgx->lmac_count = 2; 782 bgx->lmac_type = BGX_MODE_RXAUI; 783 bgx->lane_to_sds = 0xE4; 784 break; 785 case QLM_MODE_XFI_4X1: 786 bgx->lmac_count = 4; 787 bgx->lmac_type = BGX_MODE_XFI; 788 bgx->lane_to_sds = 0; 789 break; 790 case QLM_MODE_XLAUI_1X4: 791 bgx->lmac_count = 1; 792 bgx->lmac_type = BGX_MODE_XLAUI; 793 bgx->lane_to_sds = 0xE4; 794 break; 795 case QLM_MODE_10G_KR_4X1: 796 bgx->lmac_count = 4; 797 bgx->lmac_type = BGX_MODE_10G_KR; 798 bgx->lane_to_sds = 0; 799 bgx->use_training = 1; 800 break; 801 case QLM_MODE_40G_KR4_1X4: 802 bgx->lmac_count = 1; 803 bgx->lmac_type = BGX_MODE_40G_KR; 804 bgx->lane_to_sds = 0xE4; 805 bgx->use_training = 1; 806 break; 807 default: 808 bgx->lmac_count = 0; 809 break; 810 } 811 812 /* Check if low level firmware has programmed LMAC count 813 * based on board type, if yes consider that otherwise 814 * the default static values 815 */ 816 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 817 if (lmac_count != 4) 818 bgx->lmac_count = lmac_count; 819 } 820 821 static void bgx_init_hw(struct bgx *bgx) 822 { 823 int i; 824 825 bgx_set_num_ports(bgx); 826 827 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 828 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 829 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); 830 831 /* Set lmac type and lane2serdes mapping */ 832 for (i = 0; i < bgx->lmac_count; i++) { 833 if (bgx->lmac_type == BGX_MODE_RXAUI) { 834 if (i) 835 bgx->lane_to_sds = 0x0e; 836 else 837 bgx->lane_to_sds = 0x04; 838 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 839 (bgx->lmac_type << 8) | bgx->lane_to_sds); 840 continue; 841 } 842 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 843 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); 844 bgx->lmac[i].lmacid_bd = lmac_count; 845 lmac_count++; 846 } 847 848 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 849 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 850 851 /* Set the backpressure AND mask */ 852 for (i = 0; i < bgx->lmac_count; i++) 853 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 854 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << 855 (i * MAX_BGX_CHANS_PER_LMAC)); 856 857 /* Disable all MAC filtering */ 858 for (i = 0; i < RX_DMAC_COUNT; i++) 859 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 860 861 /* Disable MAC steering (NCSI traffic) */ 862 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 863 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 864 } 865 866 static void bgx_get_qlm_mode(struct bgx *bgx) 867 { 868 struct device *dev = &bgx->pdev->dev; 869 int lmac_type; 870 int train_en; 871 872 /* Read LMAC0 type to figure out QLM mode 873 * This is configured by low level firmware 874 */ 875 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 876 lmac_type = (lmac_type >> 8) & 0x07; 877 878 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & 879 SPU_PMD_CRTL_TRAIN_EN; 880 881 switch (lmac_type) { 882 case BGX_MODE_SGMII: 883 bgx->qlm_mode = QLM_MODE_SGMII; 884 dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); 885 break; 886 case BGX_MODE_XAUI: 887 bgx->qlm_mode = QLM_MODE_XAUI_1X4; 888 dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); 889 break; 890 case BGX_MODE_RXAUI: 891 bgx->qlm_mode = QLM_MODE_RXAUI_2X2; 892 dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); 893 break; 894 case BGX_MODE_XFI: 895 if (!train_en) { 896 bgx->qlm_mode = QLM_MODE_XFI_4X1; 897 dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); 898 } else { 899 bgx->qlm_mode = QLM_MODE_10G_KR_4X1; 900 dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); 901 } 902 break; 903 case BGX_MODE_XLAUI: 904 if (!train_en) { 905 bgx->qlm_mode = QLM_MODE_XLAUI_1X4; 906 dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); 907 } else { 908 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; 909 dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); 910 } 911 break; 912 default: 913 bgx->qlm_mode = QLM_MODE_SGMII; 914 dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); 915 } 916 } 917 918 #ifdef CONFIG_ACPI 919 920 static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, 921 u8 *dst) 922 { 923 u8 mac[ETH_ALEN]; 924 int ret; 925 926 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 927 "mac-address", mac, ETH_ALEN); 928 if (ret) 929 goto out; 930 931 if (!is_valid_ether_addr(mac)) { 932 dev_err(dev, "MAC address invalid: %pM\n", mac); 933 ret = -EINVAL; 934 goto out; 935 } 936 937 dev_info(dev, "MAC address set to: %pM\n", mac); 938 939 memcpy(dst, mac, ETH_ALEN); 940 out: 941 return ret; 942 } 943 944 /* Currently only sets the MAC address. */ 945 static acpi_status bgx_acpi_register_phy(acpi_handle handle, 946 u32 lvl, void *context, void **rv) 947 { 948 struct bgx *bgx = context; 949 struct device *dev = &bgx->pdev->dev; 950 struct acpi_device *adev; 951 952 if (acpi_bus_get_device(handle, &adev)) 953 goto out; 954 955 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); 956 957 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); 958 959 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 960 out: 961 bgx->lmac_count++; 962 return AE_OK; 963 } 964 965 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, 966 void *context, void **ret_val) 967 { 968 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 969 struct bgx *bgx = context; 970 char bgx_sel[5]; 971 972 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); 973 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { 974 pr_warn("Invalid link device\n"); 975 return AE_OK; 976 } 977 978 if (strncmp(string.pointer, bgx_sel, 4)) 979 return AE_OK; 980 981 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 982 bgx_acpi_register_phy, NULL, bgx, NULL); 983 984 kfree(string.pointer); 985 return AE_CTRL_TERMINATE; 986 } 987 988 static int bgx_init_acpi_phy(struct bgx *bgx) 989 { 990 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); 991 return 0; 992 } 993 994 #else 995 996 static int bgx_init_acpi_phy(struct bgx *bgx) 997 { 998 return -ENODEV; 999 } 1000 1001 #endif /* CONFIG_ACPI */ 1002 1003 #if IS_ENABLED(CONFIG_OF_MDIO) 1004 1005 static int bgx_init_of_phy(struct bgx *bgx) 1006 { 1007 struct fwnode_handle *fwn; 1008 struct device_node *node = NULL; 1009 u8 lmac = 0; 1010 1011 device_for_each_child_node(&bgx->pdev->dev, fwn) { 1012 struct phy_device *pd; 1013 struct device_node *phy_np; 1014 const char *mac; 1015 1016 /* Should always be an OF node. But if it is not, we 1017 * cannot handle it, so exit the loop. 1018 */ 1019 node = to_of_node(fwn); 1020 if (!node) 1021 break; 1022 1023 mac = of_get_mac_address(node); 1024 if (mac) 1025 ether_addr_copy(bgx->lmac[lmac].mac, mac); 1026 1027 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 1028 bgx->lmac[lmac].lmacid = lmac; 1029 1030 phy_np = of_parse_phandle(node, "phy-handle", 0); 1031 /* If there is no phy or defective firmware presents 1032 * this cortina phy, for which there is no driver 1033 * support, ignore it. 1034 */ 1035 if (phy_np && 1036 !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { 1037 /* Wait until the phy drivers are available */ 1038 pd = of_phy_find_device(phy_np); 1039 if (!pd) 1040 goto defer; 1041 bgx->lmac[lmac].phydev = pd; 1042 } 1043 1044 lmac++; 1045 if (lmac == MAX_LMAC_PER_BGX) { 1046 of_node_put(node); 1047 break; 1048 } 1049 } 1050 return 0; 1051 1052 defer: 1053 /* We are bailing out, try not to leak device reference counts 1054 * for phy devices we may have already found. 1055 */ 1056 while (lmac) { 1057 if (bgx->lmac[lmac].phydev) { 1058 put_device(&bgx->lmac[lmac].phydev->mdio.dev); 1059 bgx->lmac[lmac].phydev = NULL; 1060 } 1061 lmac--; 1062 } 1063 of_node_put(node); 1064 return -EPROBE_DEFER; 1065 } 1066 1067 #else 1068 1069 static int bgx_init_of_phy(struct bgx *bgx) 1070 { 1071 return -ENODEV; 1072 } 1073 1074 #endif /* CONFIG_OF_MDIO */ 1075 1076 static int bgx_init_phy(struct bgx *bgx) 1077 { 1078 if (!acpi_disabled) 1079 return bgx_init_acpi_phy(bgx); 1080 1081 return bgx_init_of_phy(bgx); 1082 } 1083 1084 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1085 { 1086 int err; 1087 struct device *dev = &pdev->dev; 1088 struct bgx *bgx = NULL; 1089 u8 lmac; 1090 1091 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1092 if (!bgx) 1093 return -ENOMEM; 1094 bgx->pdev = pdev; 1095 1096 pci_set_drvdata(pdev, bgx); 1097 1098 err = pci_enable_device(pdev); 1099 if (err) { 1100 dev_err(dev, "Failed to enable PCI device\n"); 1101 pci_set_drvdata(pdev, NULL); 1102 return err; 1103 } 1104 1105 err = pci_request_regions(pdev, DRV_NAME); 1106 if (err) { 1107 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1108 goto err_disable_device; 1109 } 1110 1111 /* MAP configuration registers */ 1112 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1113 if (!bgx->reg_base) { 1114 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); 1115 err = -ENOMEM; 1116 goto err_release_regions; 1117 } 1118 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1119 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; 1120 1121 bgx_vnic[bgx->bgx_id] = bgx; 1122 bgx_get_qlm_mode(bgx); 1123 1124 err = bgx_init_phy(bgx); 1125 if (err) 1126 goto err_enable; 1127 1128 bgx_init_hw(bgx); 1129 1130 /* Enable all LMACs */ 1131 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1132 err = bgx_lmac_enable(bgx, lmac); 1133 if (err) { 1134 dev_err(dev, "BGX%d failed to enable lmac%d\n", 1135 bgx->bgx_id, lmac); 1136 goto err_enable; 1137 } 1138 } 1139 1140 return 0; 1141 1142 err_enable: 1143 bgx_vnic[bgx->bgx_id] = NULL; 1144 err_release_regions: 1145 pci_release_regions(pdev); 1146 err_disable_device: 1147 pci_disable_device(pdev); 1148 pci_set_drvdata(pdev, NULL); 1149 return err; 1150 } 1151 1152 static void bgx_remove(struct pci_dev *pdev) 1153 { 1154 struct bgx *bgx = pci_get_drvdata(pdev); 1155 u8 lmac; 1156 1157 /* Disable all LMACs */ 1158 for (lmac = 0; lmac < bgx->lmac_count; lmac++) 1159 bgx_lmac_disable(bgx, lmac); 1160 1161 bgx_vnic[bgx->bgx_id] = NULL; 1162 pci_release_regions(pdev); 1163 pci_disable_device(pdev); 1164 pci_set_drvdata(pdev, NULL); 1165 } 1166 1167 static struct pci_driver bgx_driver = { 1168 .name = DRV_NAME, 1169 .id_table = bgx_id_table, 1170 .probe = bgx_probe, 1171 .remove = bgx_remove, 1172 }; 1173 1174 static int __init bgx_init_module(void) 1175 { 1176 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1177 1178 return pci_register_driver(&bgx_driver); 1179 } 1180 1181 static void __exit bgx_cleanup_module(void) 1182 { 1183 pci_unregister_driver(&bgx_driver); 1184 } 1185 1186 module_init(bgx_init_module); 1187 module_exit(bgx_cleanup_module); 1188