1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation 3 */ 4 5 #include <linux/clk-provider.h> 6 #include <linux/pci.h> 7 #include <linux/dmi.h> 8 #include "dwmac-intel.h" 9 #include "dwmac4.h" 10 #include "stmmac.h" 11 #include "stmmac_ptp.h" 12 13 struct intel_priv_data { 14 int mdio_adhoc_addr; /* mdio address for serdes & etc */ 15 unsigned long crossts_adj; 16 bool is_pse; 17 }; 18 19 /* This struct is used to associate PCI Function of MAC controller on a board, 20 * discovered via DMI, with the address of PHY connected to the MAC. The 21 * negative value of the address means that MAC controller is not connected 22 * with PHY. 23 */ 24 struct stmmac_pci_func_data { 25 unsigned int func; 26 int phy_addr; 27 }; 28 29 struct stmmac_pci_dmi_data { 30 const struct stmmac_pci_func_data *func; 31 size_t nfuncs; 32 }; 33 34 struct stmmac_pci_info { 35 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); 36 }; 37 38 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, 39 const struct dmi_system_id *dmi_list) 40 { 41 const struct stmmac_pci_func_data *func_data; 42 const struct stmmac_pci_dmi_data *dmi_data; 43 const struct dmi_system_id *dmi_id; 44 int func = PCI_FUNC(pdev->devfn); 45 size_t n; 46 47 dmi_id = dmi_first_match(dmi_list); 48 if (!dmi_id) 49 return -ENODEV; 50 51 dmi_data = dmi_id->driver_data; 52 func_data = dmi_data->func; 53 54 for (n = 0; n < dmi_data->nfuncs; n++, func_data++) 55 if (func_data->func == func) 56 return func_data->phy_addr; 57 58 return -ENODEV; 59 } 60 61 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, 62 int phyreg, u32 mask, u32 val) 63 { 64 unsigned int retries = 10; 65 int val_rd; 66 67 do { 68 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); 69 if ((val_rd & mask) == (val & mask)) 70 return 0; 71 udelay(POLL_DELAY_US); 72 } while (--retries); 73 74 return -ETIMEDOUT; 75 } 76 77 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) 78 { 79 struct intel_priv_data *intel_priv = priv_data; 80 struct stmmac_priv *priv = netdev_priv(ndev); 81 int serdes_phy_addr = 0; 82 u32 data = 0; 83 84 if (!intel_priv->mdio_adhoc_addr) 85 return 0; 86 87 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 88 89 /* Set the serdes rate and the PCLK rate */ 90 data = mdiobus_read(priv->mii, serdes_phy_addr, 91 SERDES_GCR0); 92 93 data &= ~SERDES_RATE_MASK; 94 data &= ~SERDES_PCLK_MASK; 95 96 if (priv->plat->max_speed == 2500) 97 data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | 98 SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; 99 else 100 data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | 101 SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; 102 103 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 104 105 /* assert clk_req */ 106 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 107 data |= SERDES_PLL_CLK; 108 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 109 110 /* check for clk_ack assertion */ 111 data = serdes_status_poll(priv, serdes_phy_addr, 112 SERDES_GSR0, 113 SERDES_PLL_CLK, 114 SERDES_PLL_CLK); 115 116 if (data) { 117 dev_err(priv->device, "Serdes PLL clk request timeout\n"); 118 return data; 119 } 120 121 /* assert lane reset */ 122 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 123 data |= SERDES_RST; 124 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 125 126 /* check for assert lane reset reflection */ 127 data = serdes_status_poll(priv, serdes_phy_addr, 128 SERDES_GSR0, 129 SERDES_RST, 130 SERDES_RST); 131 132 if (data) { 133 dev_err(priv->device, "Serdes assert lane reset timeout\n"); 134 return data; 135 } 136 137 /* move power state to P0 */ 138 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 139 140 data &= ~SERDES_PWR_ST_MASK; 141 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; 142 143 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 144 145 /* Check for P0 state */ 146 data = serdes_status_poll(priv, serdes_phy_addr, 147 SERDES_GSR0, 148 SERDES_PWR_ST_MASK, 149 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); 150 151 if (data) { 152 dev_err(priv->device, "Serdes power state P0 timeout.\n"); 153 return data; 154 } 155 156 /* PSE only - ungate SGMII PHY Rx Clock */ 157 if (intel_priv->is_pse) 158 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 159 0, SERDES_PHY_RX_CLK); 160 161 return 0; 162 } 163 164 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) 165 { 166 struct intel_priv_data *intel_priv = intel_data; 167 struct stmmac_priv *priv = netdev_priv(ndev); 168 int serdes_phy_addr = 0; 169 u32 data = 0; 170 171 if (!intel_priv->mdio_adhoc_addr) 172 return; 173 174 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 175 176 /* PSE only - gate SGMII PHY Rx Clock */ 177 if (intel_priv->is_pse) 178 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 179 SERDES_PHY_RX_CLK, 0); 180 181 /* move power state to P3 */ 182 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 183 184 data &= ~SERDES_PWR_ST_MASK; 185 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; 186 187 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 188 189 /* Check for P3 state */ 190 data = serdes_status_poll(priv, serdes_phy_addr, 191 SERDES_GSR0, 192 SERDES_PWR_ST_MASK, 193 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); 194 195 if (data) { 196 dev_err(priv->device, "Serdes power state P3 timeout\n"); 197 return; 198 } 199 200 /* de-assert clk_req */ 201 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 202 data &= ~SERDES_PLL_CLK; 203 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 204 205 /* check for clk_ack de-assert */ 206 data = serdes_status_poll(priv, serdes_phy_addr, 207 SERDES_GSR0, 208 SERDES_PLL_CLK, 209 (u32)~SERDES_PLL_CLK); 210 211 if (data) { 212 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); 213 return; 214 } 215 216 /* de-assert lane reset */ 217 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 218 data &= ~SERDES_RST; 219 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 220 221 /* check for de-assert lane reset reflection */ 222 data = serdes_status_poll(priv, serdes_phy_addr, 223 SERDES_GSR0, 224 SERDES_RST, 225 (u32)~SERDES_RST); 226 227 if (data) { 228 dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); 229 return; 230 } 231 } 232 233 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) 234 { 235 struct intel_priv_data *intel_priv = intel_data; 236 struct stmmac_priv *priv = netdev_priv(ndev); 237 int serdes_phy_addr = 0; 238 u32 data = 0; 239 240 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 241 242 /* Determine the link speed mode: 2.5Gbps/1Gbps */ 243 data = mdiobus_read(priv->mii, serdes_phy_addr, 244 SERDES_GCR); 245 246 if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == 247 SERDES_LINK_MODE_2G5) { 248 dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); 249 priv->plat->max_speed = 2500; 250 priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; 251 priv->plat->mdio_bus_data->xpcs_an_inband = false; 252 } else { 253 priv->plat->max_speed = 1000; 254 priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 255 priv->plat->mdio_bus_data->xpcs_an_inband = true; 256 } 257 } 258 259 /* Program PTP Clock Frequency for different variant of 260 * Intel mGBE that has slightly different GPO mapping 261 */ 262 static void intel_mgbe_ptp_clk_freq_config(void *npriv) 263 { 264 struct stmmac_priv *priv = (struct stmmac_priv *)npriv; 265 struct intel_priv_data *intel_priv; 266 u32 gpio_value; 267 268 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; 269 270 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); 271 272 if (intel_priv->is_pse) { 273 /* For PSE GbE, use 200MHz */ 274 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; 275 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; 276 } else { 277 /* For PCH GbE, use 200MHz */ 278 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; 279 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; 280 } 281 282 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); 283 } 284 285 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, 286 u64 *art_time) 287 { 288 u64 ns; 289 290 ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); 291 ns <<= GMAC4_ART_TIME_SHIFT; 292 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); 293 ns <<= GMAC4_ART_TIME_SHIFT; 294 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); 295 ns <<= GMAC4_ART_TIME_SHIFT; 296 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); 297 298 *art_time = ns; 299 } 300 301 static int intel_crosststamp(ktime_t *device, 302 struct system_counterval_t *system, 303 void *ctx) 304 { 305 struct intel_priv_data *intel_priv; 306 307 struct stmmac_priv *priv = (struct stmmac_priv *)ctx; 308 void __iomem *ptpaddr = priv->ptpaddr; 309 void __iomem *ioaddr = priv->hw->pcsr; 310 unsigned long flags; 311 u64 art_time = 0; 312 u64 ptp_time = 0; 313 u32 num_snapshot; 314 u32 gpio_value; 315 u32 acr_value; 316 int ret; 317 u32 v; 318 int i; 319 320 if (!boot_cpu_has(X86_FEATURE_ART)) 321 return -EOPNOTSUPP; 322 323 intel_priv = priv->plat->bsp_priv; 324 325 /* Both internal crosstimestamping and external triggered event 326 * timestamping cannot be run concurrently. 327 */ 328 if (priv->plat->ext_snapshot_en) 329 return -EBUSY; 330 331 mutex_lock(&priv->aux_ts_lock); 332 /* Enable Internal snapshot trigger */ 333 acr_value = readl(ptpaddr + PTP_ACR); 334 acr_value &= ~PTP_ACR_MASK; 335 switch (priv->plat->int_snapshot_num) { 336 case AUX_SNAPSHOT0: 337 acr_value |= PTP_ACR_ATSEN0; 338 break; 339 case AUX_SNAPSHOT1: 340 acr_value |= PTP_ACR_ATSEN1; 341 break; 342 case AUX_SNAPSHOT2: 343 acr_value |= PTP_ACR_ATSEN2; 344 break; 345 case AUX_SNAPSHOT3: 346 acr_value |= PTP_ACR_ATSEN3; 347 break; 348 default: 349 mutex_unlock(&priv->aux_ts_lock); 350 return -EINVAL; 351 } 352 writel(acr_value, ptpaddr + PTP_ACR); 353 354 /* Clear FIFO */ 355 acr_value = readl(ptpaddr + PTP_ACR); 356 acr_value |= PTP_ACR_ATSFC; 357 writel(acr_value, ptpaddr + PTP_ACR); 358 /* Release the mutex */ 359 mutex_unlock(&priv->aux_ts_lock); 360 361 /* Trigger Internal snapshot signal 362 * Create a rising edge by just toggle the GPO1 to low 363 * and back to high. 364 */ 365 gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); 366 gpio_value &= ~GMAC_GPO1; 367 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 368 gpio_value |= GMAC_GPO1; 369 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 370 371 /* Poll for time sync operation done */ 372 ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, 373 (v & GMAC_INT_TSIE), 100, 10000); 374 375 if (ret == -ETIMEDOUT) { 376 pr_err("%s: Wait for time sync operation timeout\n", __func__); 377 return ret; 378 } 379 380 num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & 381 GMAC_TIMESTAMP_ATSNS_MASK) >> 382 GMAC_TIMESTAMP_ATSNS_SHIFT; 383 384 /* Repeat until the timestamps are from the FIFO last segment */ 385 for (i = 0; i < num_snapshot; i++) { 386 read_lock_irqsave(&priv->ptp_lock, flags); 387 stmmac_get_ptptime(priv, ptpaddr, &ptp_time); 388 *device = ns_to_ktime(ptp_time); 389 read_unlock_irqrestore(&priv->ptp_lock, flags); 390 get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); 391 *system = convert_art_to_tsc(art_time); 392 } 393 394 system->cycles *= intel_priv->crossts_adj; 395 396 return 0; 397 } 398 399 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, 400 int base) 401 { 402 if (boot_cpu_has(X86_FEATURE_ART)) { 403 unsigned int art_freq; 404 405 /* On systems that support ART, ART frequency can be obtained 406 * from ECX register of CPUID leaf (0x15). 407 */ 408 art_freq = cpuid_ecx(ART_CPUID_LEAF); 409 do_div(art_freq, base); 410 intel_priv->crossts_adj = art_freq; 411 } 412 } 413 414 static void common_default_data(struct plat_stmmacenet_data *plat) 415 { 416 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 417 plat->has_gmac = 1; 418 plat->force_sf_dma_mode = 1; 419 420 plat->mdio_bus_data->needs_reset = true; 421 422 /* Set default value for multicast hash bins */ 423 plat->multicast_filter_bins = HASH_TABLE_SIZE; 424 425 /* Set default value for unicast filter entries */ 426 plat->unicast_filter_entries = 1; 427 428 /* Set the maxmtu to a default of JUMBO_LEN */ 429 plat->maxmtu = JUMBO_LEN; 430 431 /* Set default number of RX and TX queues to use */ 432 plat->tx_queues_to_use = 1; 433 plat->rx_queues_to_use = 1; 434 435 /* Disable Priority config by default */ 436 plat->tx_queues_cfg[0].use_prio = false; 437 plat->rx_queues_cfg[0].use_prio = false; 438 439 /* Disable RX queues routing by default */ 440 plat->rx_queues_cfg[0].pkt_route = 0x0; 441 } 442 443 static int intel_mgbe_common_data(struct pci_dev *pdev, 444 struct plat_stmmacenet_data *plat) 445 { 446 char clk_name[20]; 447 int ret; 448 int i; 449 450 plat->pdev = pdev; 451 plat->phy_addr = -1; 452 plat->clk_csr = 5; 453 plat->has_gmac = 0; 454 plat->has_gmac4 = 1; 455 plat->force_sf_dma_mode = 0; 456 plat->tso_en = 1; 457 458 /* Multiplying factor to the clk_eee_i clock time 459 * period to make it closer to 100 ns. This value 460 * should be programmed such that the clk_eee_time_period * 461 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns 462 * clk_eee frequency is 19.2Mhz 463 * clk_eee_time_period is 52ns 464 * 52ns * (1 + 1) = 104ns 465 * MULT_FACT_100NS = 1 466 */ 467 plat->mult_fact_100ns = 1; 468 469 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 470 471 for (i = 0; i < plat->rx_queues_to_use; i++) { 472 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 473 plat->rx_queues_cfg[i].chan = i; 474 475 /* Disable Priority config by default */ 476 plat->rx_queues_cfg[i].use_prio = false; 477 478 /* Disable RX queues routing by default */ 479 plat->rx_queues_cfg[i].pkt_route = 0x0; 480 } 481 482 for (i = 0; i < plat->tx_queues_to_use; i++) { 483 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 484 485 /* Disable Priority config by default */ 486 plat->tx_queues_cfg[i].use_prio = false; 487 /* Default TX Q0 to use TSO and rest TXQ for TBS */ 488 if (i > 0) 489 plat->tx_queues_cfg[i].tbs_en = 1; 490 } 491 492 /* FIFO size is 4096 bytes for 1 tx/rx queue */ 493 plat->tx_fifo_size = plat->tx_queues_to_use * 4096; 494 plat->rx_fifo_size = plat->rx_queues_to_use * 4096; 495 496 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 497 plat->tx_queues_cfg[0].weight = 0x09; 498 plat->tx_queues_cfg[1].weight = 0x0A; 499 plat->tx_queues_cfg[2].weight = 0x0B; 500 plat->tx_queues_cfg[3].weight = 0x0C; 501 plat->tx_queues_cfg[4].weight = 0x0D; 502 plat->tx_queues_cfg[5].weight = 0x0E; 503 plat->tx_queues_cfg[6].weight = 0x0F; 504 plat->tx_queues_cfg[7].weight = 0x10; 505 506 plat->dma_cfg->pbl = 32; 507 plat->dma_cfg->pblx8 = true; 508 plat->dma_cfg->fixed_burst = 0; 509 plat->dma_cfg->mixed_burst = 0; 510 plat->dma_cfg->aal = 0; 511 plat->dma_cfg->dche = true; 512 513 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), 514 GFP_KERNEL); 515 if (!plat->axi) 516 return -ENOMEM; 517 518 plat->axi->axi_lpi_en = 0; 519 plat->axi->axi_xit_frm = 0; 520 plat->axi->axi_wr_osr_lmt = 1; 521 plat->axi->axi_rd_osr_lmt = 1; 522 plat->axi->axi_blen[0] = 4; 523 plat->axi->axi_blen[1] = 8; 524 plat->axi->axi_blen[2] = 16; 525 526 plat->ptp_max_adj = plat->clk_ptp_rate; 527 plat->eee_usecs_rate = plat->clk_ptp_rate; 528 529 /* Set system clock */ 530 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); 531 532 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, 533 clk_name, NULL, 0, 534 plat->clk_ptp_rate); 535 536 if (IS_ERR(plat->stmmac_clk)) { 537 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); 538 plat->stmmac_clk = NULL; 539 } 540 541 ret = clk_prepare_enable(plat->stmmac_clk); 542 if (ret) { 543 clk_unregister_fixed_rate(plat->stmmac_clk); 544 return ret; 545 } 546 547 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; 548 549 /* Set default value for multicast hash bins */ 550 plat->multicast_filter_bins = HASH_TABLE_SIZE; 551 552 /* Set default value for unicast filter entries */ 553 plat->unicast_filter_entries = 1; 554 555 /* Set the maxmtu to a default of JUMBO_LEN */ 556 plat->maxmtu = JUMBO_LEN; 557 558 plat->vlan_fail_q_en = true; 559 560 /* Use the last Rx queue */ 561 plat->vlan_fail_q = plat->rx_queues_to_use - 1; 562 563 /* Intel mgbe SGMII interface uses pcs-xcps */ 564 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { 565 plat->mdio_bus_data->has_xpcs = true; 566 plat->mdio_bus_data->xpcs_an_inband = true; 567 } 568 569 /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ 570 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; 571 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; 572 573 plat->int_snapshot_num = AUX_SNAPSHOT1; 574 plat->ext_snapshot_num = AUX_SNAPSHOT0; 575 576 plat->has_crossts = true; 577 plat->crosststamp = intel_crosststamp; 578 579 /* Setup MSI vector offset specific to Intel mGbE controller */ 580 plat->msi_mac_vec = 29; 581 plat->msi_lpi_vec = 28; 582 plat->msi_sfty_ce_vec = 27; 583 plat->msi_sfty_ue_vec = 26; 584 plat->msi_rx_base_vec = 0; 585 plat->msi_tx_base_vec = 1; 586 587 return 0; 588 } 589 590 static int ehl_common_data(struct pci_dev *pdev, 591 struct plat_stmmacenet_data *plat) 592 { 593 plat->rx_queues_to_use = 8; 594 plat->tx_queues_to_use = 8; 595 plat->clk_ptp_rate = 200000000; 596 plat->use_phy_wol = 1; 597 598 plat->safety_feat_cfg->tsoee = 1; 599 plat->safety_feat_cfg->mrxpee = 1; 600 plat->safety_feat_cfg->mestee = 1; 601 plat->safety_feat_cfg->mrxee = 1; 602 plat->safety_feat_cfg->mtxee = 1; 603 plat->safety_feat_cfg->epsi = 0; 604 plat->safety_feat_cfg->edpp = 0; 605 plat->safety_feat_cfg->prtyen = 0; 606 plat->safety_feat_cfg->tmouten = 0; 607 608 return intel_mgbe_common_data(pdev, plat); 609 } 610 611 static int ehl_sgmii_data(struct pci_dev *pdev, 612 struct plat_stmmacenet_data *plat) 613 { 614 plat->bus_id = 1; 615 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 616 plat->speed_mode_2500 = intel_speed_mode_2500; 617 plat->serdes_powerup = intel_serdes_powerup; 618 plat->serdes_powerdown = intel_serdes_powerdown; 619 620 return ehl_common_data(pdev, plat); 621 } 622 623 static struct stmmac_pci_info ehl_sgmii1g_info = { 624 .setup = ehl_sgmii_data, 625 }; 626 627 static int ehl_rgmii_data(struct pci_dev *pdev, 628 struct plat_stmmacenet_data *plat) 629 { 630 plat->bus_id = 1; 631 plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 632 633 return ehl_common_data(pdev, plat); 634 } 635 636 static struct stmmac_pci_info ehl_rgmii1g_info = { 637 .setup = ehl_rgmii_data, 638 }; 639 640 static int ehl_pse0_common_data(struct pci_dev *pdev, 641 struct plat_stmmacenet_data *plat) 642 { 643 struct intel_priv_data *intel_priv = plat->bsp_priv; 644 645 intel_priv->is_pse = true; 646 plat->bus_id = 2; 647 plat->addr64 = 32; 648 649 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 650 651 return ehl_common_data(pdev, plat); 652 } 653 654 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, 655 struct plat_stmmacenet_data *plat) 656 { 657 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 658 return ehl_pse0_common_data(pdev, plat); 659 } 660 661 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { 662 .setup = ehl_pse0_rgmii1g_data, 663 }; 664 665 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, 666 struct plat_stmmacenet_data *plat) 667 { 668 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 669 plat->speed_mode_2500 = intel_speed_mode_2500; 670 plat->serdes_powerup = intel_serdes_powerup; 671 plat->serdes_powerdown = intel_serdes_powerdown; 672 return ehl_pse0_common_data(pdev, plat); 673 } 674 675 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { 676 .setup = ehl_pse0_sgmii1g_data, 677 }; 678 679 static int ehl_pse1_common_data(struct pci_dev *pdev, 680 struct plat_stmmacenet_data *plat) 681 { 682 struct intel_priv_data *intel_priv = plat->bsp_priv; 683 684 intel_priv->is_pse = true; 685 plat->bus_id = 3; 686 plat->addr64 = 32; 687 688 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 689 690 return ehl_common_data(pdev, plat); 691 } 692 693 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, 694 struct plat_stmmacenet_data *plat) 695 { 696 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 697 return ehl_pse1_common_data(pdev, plat); 698 } 699 700 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { 701 .setup = ehl_pse1_rgmii1g_data, 702 }; 703 704 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, 705 struct plat_stmmacenet_data *plat) 706 { 707 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 708 plat->speed_mode_2500 = intel_speed_mode_2500; 709 plat->serdes_powerup = intel_serdes_powerup; 710 plat->serdes_powerdown = intel_serdes_powerdown; 711 return ehl_pse1_common_data(pdev, plat); 712 } 713 714 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { 715 .setup = ehl_pse1_sgmii1g_data, 716 }; 717 718 static int tgl_common_data(struct pci_dev *pdev, 719 struct plat_stmmacenet_data *plat) 720 { 721 plat->rx_queues_to_use = 6; 722 plat->tx_queues_to_use = 4; 723 plat->clk_ptp_rate = 200000000; 724 plat->speed_mode_2500 = intel_speed_mode_2500; 725 726 plat->safety_feat_cfg->tsoee = 1; 727 plat->safety_feat_cfg->mrxpee = 0; 728 plat->safety_feat_cfg->mestee = 1; 729 plat->safety_feat_cfg->mrxee = 1; 730 plat->safety_feat_cfg->mtxee = 1; 731 plat->safety_feat_cfg->epsi = 0; 732 plat->safety_feat_cfg->edpp = 0; 733 plat->safety_feat_cfg->prtyen = 0; 734 plat->safety_feat_cfg->tmouten = 0; 735 736 return intel_mgbe_common_data(pdev, plat); 737 } 738 739 static int tgl_sgmii_phy0_data(struct pci_dev *pdev, 740 struct plat_stmmacenet_data *plat) 741 { 742 plat->bus_id = 1; 743 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 744 plat->serdes_powerup = intel_serdes_powerup; 745 plat->serdes_powerdown = intel_serdes_powerdown; 746 return tgl_common_data(pdev, plat); 747 } 748 749 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { 750 .setup = tgl_sgmii_phy0_data, 751 }; 752 753 static int tgl_sgmii_phy1_data(struct pci_dev *pdev, 754 struct plat_stmmacenet_data *plat) 755 { 756 plat->bus_id = 2; 757 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 758 plat->serdes_powerup = intel_serdes_powerup; 759 plat->serdes_powerdown = intel_serdes_powerdown; 760 return tgl_common_data(pdev, plat); 761 } 762 763 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { 764 .setup = tgl_sgmii_phy1_data, 765 }; 766 767 static int adls_sgmii_phy0_data(struct pci_dev *pdev, 768 struct plat_stmmacenet_data *plat) 769 { 770 plat->bus_id = 1; 771 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 772 773 /* SerDes power up and power down are done in BIOS for ADL */ 774 775 return tgl_common_data(pdev, plat); 776 } 777 778 static struct stmmac_pci_info adls_sgmii1g_phy0_info = { 779 .setup = adls_sgmii_phy0_data, 780 }; 781 782 static int adls_sgmii_phy1_data(struct pci_dev *pdev, 783 struct plat_stmmacenet_data *plat) 784 { 785 plat->bus_id = 2; 786 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 787 788 /* SerDes power up and power down are done in BIOS for ADL */ 789 790 return tgl_common_data(pdev, plat); 791 } 792 793 static struct stmmac_pci_info adls_sgmii1g_phy1_info = { 794 .setup = adls_sgmii_phy1_data, 795 }; 796 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { 797 { 798 .func = 6, 799 .phy_addr = 1, 800 }, 801 }; 802 803 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { 804 .func = galileo_stmmac_func_data, 805 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), 806 }; 807 808 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { 809 { 810 .func = 6, 811 .phy_addr = 1, 812 }, 813 { 814 .func = 7, 815 .phy_addr = 1, 816 }, 817 }; 818 819 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { 820 .func = iot2040_stmmac_func_data, 821 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), 822 }; 823 824 static const struct dmi_system_id quark_pci_dmi[] = { 825 { 826 .matches = { 827 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), 828 }, 829 .driver_data = (void *)&galileo_stmmac_dmi_data, 830 }, 831 { 832 .matches = { 833 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 834 }, 835 .driver_data = (void *)&galileo_stmmac_dmi_data, 836 }, 837 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. 838 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 839 * has only one pci network device while other asset tags are 840 * for IOT2040 which has two. 841 */ 842 { 843 .matches = { 844 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 845 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, 846 "6ES7647-0AA00-0YA2"), 847 }, 848 .driver_data = (void *)&galileo_stmmac_dmi_data, 849 }, 850 { 851 .matches = { 852 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 853 }, 854 .driver_data = (void *)&iot2040_stmmac_dmi_data, 855 }, 856 {} 857 }; 858 859 static int quark_default_data(struct pci_dev *pdev, 860 struct plat_stmmacenet_data *plat) 861 { 862 int ret; 863 864 /* Set common default data first */ 865 common_default_data(plat); 866 867 /* Refuse to load the driver and register net device if MAC controller 868 * does not connect to any PHY interface. 869 */ 870 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); 871 if (ret < 0) { 872 /* Return error to the caller on DMI enabled boards. */ 873 if (dmi_get_system_info(DMI_BOARD_NAME)) 874 return ret; 875 876 /* Galileo boards with old firmware don't support DMI. We always 877 * use 1 here as PHY address, so at least the first found MAC 878 * controller would be probed. 879 */ 880 ret = 1; 881 } 882 883 plat->bus_id = pci_dev_id(pdev); 884 plat->phy_addr = ret; 885 plat->phy_interface = PHY_INTERFACE_MODE_RMII; 886 887 plat->dma_cfg->pbl = 16; 888 plat->dma_cfg->pblx8 = true; 889 plat->dma_cfg->fixed_burst = 1; 890 /* AXI (TODO) */ 891 892 return 0; 893 } 894 895 static const struct stmmac_pci_info quark_info = { 896 .setup = quark_default_data, 897 }; 898 899 static int stmmac_config_single_msi(struct pci_dev *pdev, 900 struct plat_stmmacenet_data *plat, 901 struct stmmac_resources *res) 902 { 903 int ret; 904 905 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 906 if (ret < 0) { 907 dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", 908 __func__); 909 return ret; 910 } 911 912 res->irq = pci_irq_vector(pdev, 0); 913 res->wol_irq = res->irq; 914 plat->multi_msi_en = 0; 915 dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", 916 __func__); 917 918 return 0; 919 } 920 921 static int stmmac_config_multi_msi(struct pci_dev *pdev, 922 struct plat_stmmacenet_data *plat, 923 struct stmmac_resources *res) 924 { 925 int ret; 926 int i; 927 928 if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || 929 plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { 930 dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", 931 __func__); 932 return -1; 933 } 934 935 ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, 936 PCI_IRQ_MSI | PCI_IRQ_MSIX); 937 if (ret < 0) { 938 dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", 939 __func__); 940 return ret; 941 } 942 943 /* For RX MSI */ 944 for (i = 0; i < plat->rx_queues_to_use; i++) { 945 res->rx_irq[i] = pci_irq_vector(pdev, 946 plat->msi_rx_base_vec + i * 2); 947 } 948 949 /* For TX MSI */ 950 for (i = 0; i < plat->tx_queues_to_use; i++) { 951 res->tx_irq[i] = pci_irq_vector(pdev, 952 plat->msi_tx_base_vec + i * 2); 953 } 954 955 if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) 956 res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); 957 if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) 958 res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); 959 if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) 960 res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); 961 if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) 962 res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); 963 if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) 964 res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); 965 966 plat->multi_msi_en = 1; 967 dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); 968 969 return 0; 970 } 971 972 /** 973 * intel_eth_pci_probe 974 * 975 * @pdev: pci device pointer 976 * @id: pointer to table of device id/id's. 977 * 978 * Description: This probing function gets called for all PCI devices which 979 * match the ID table and are not "owned" by other driver yet. This function 980 * gets passed a "struct pci_dev *" for each device whose entry in the ID table 981 * matches the device. The probe functions returns zero when the driver choose 982 * to take "ownership" of the device or an error code(-ve no) otherwise. 983 */ 984 static int intel_eth_pci_probe(struct pci_dev *pdev, 985 const struct pci_device_id *id) 986 { 987 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; 988 struct intel_priv_data *intel_priv; 989 struct plat_stmmacenet_data *plat; 990 struct stmmac_resources res; 991 int ret; 992 993 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); 994 if (!intel_priv) 995 return -ENOMEM; 996 997 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 998 if (!plat) 999 return -ENOMEM; 1000 1001 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 1002 sizeof(*plat->mdio_bus_data), 1003 GFP_KERNEL); 1004 if (!plat->mdio_bus_data) 1005 return -ENOMEM; 1006 1007 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), 1008 GFP_KERNEL); 1009 if (!plat->dma_cfg) 1010 return -ENOMEM; 1011 1012 plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, 1013 sizeof(*plat->safety_feat_cfg), 1014 GFP_KERNEL); 1015 if (!plat->safety_feat_cfg) 1016 return -ENOMEM; 1017 1018 /* Enable pci device */ 1019 ret = pcim_enable_device(pdev); 1020 if (ret) { 1021 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 1022 __func__); 1023 return ret; 1024 } 1025 1026 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 1027 if (ret) 1028 return ret; 1029 1030 pci_set_master(pdev); 1031 1032 plat->bsp_priv = intel_priv; 1033 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; 1034 intel_priv->crossts_adj = 1; 1035 1036 /* Initialize all MSI vectors to invalid so that it can be set 1037 * according to platform data settings below. 1038 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) 1039 */ 1040 plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; 1041 plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; 1042 plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; 1043 plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; 1044 plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; 1045 plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; 1046 plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; 1047 1048 ret = info->setup(pdev, plat); 1049 if (ret) 1050 return ret; 1051 1052 memset(&res, 0, sizeof(res)); 1053 res.addr = pcim_iomap_table(pdev)[0]; 1054 1055 if (plat->eee_usecs_rate > 0) { 1056 u32 tx_lpi_usec; 1057 1058 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; 1059 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); 1060 } 1061 1062 ret = stmmac_config_multi_msi(pdev, plat, &res); 1063 if (ret) { 1064 ret = stmmac_config_single_msi(pdev, plat, &res); 1065 if (ret) { 1066 dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", 1067 __func__); 1068 goto err_alloc_irq; 1069 } 1070 } 1071 1072 ret = stmmac_dvr_probe(&pdev->dev, plat, &res); 1073 if (ret) { 1074 goto err_dvr_probe; 1075 } 1076 1077 return 0; 1078 1079 err_dvr_probe: 1080 pci_free_irq_vectors(pdev); 1081 err_alloc_irq: 1082 clk_disable_unprepare(plat->stmmac_clk); 1083 clk_unregister_fixed_rate(plat->stmmac_clk); 1084 return ret; 1085 } 1086 1087 /** 1088 * intel_eth_pci_remove 1089 * 1090 * @pdev: pci device pointer 1091 * Description: this function calls the main to free the net resources 1092 * and releases the PCI resources. 1093 */ 1094 static void intel_eth_pci_remove(struct pci_dev *pdev) 1095 { 1096 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 1097 struct stmmac_priv *priv = netdev_priv(ndev); 1098 1099 stmmac_dvr_remove(&pdev->dev); 1100 1101 clk_unregister_fixed_rate(priv->plat->stmmac_clk); 1102 1103 pcim_iounmap_regions(pdev, BIT(0)); 1104 } 1105 1106 static int __maybe_unused intel_eth_pci_suspend(struct device *dev) 1107 { 1108 struct pci_dev *pdev = to_pci_dev(dev); 1109 int ret; 1110 1111 ret = stmmac_suspend(dev); 1112 if (ret) 1113 return ret; 1114 1115 ret = pci_save_state(pdev); 1116 if (ret) 1117 return ret; 1118 1119 pci_wake_from_d3(pdev, true); 1120 pci_set_power_state(pdev, PCI_D3hot); 1121 return 0; 1122 } 1123 1124 static int __maybe_unused intel_eth_pci_resume(struct device *dev) 1125 { 1126 struct pci_dev *pdev = to_pci_dev(dev); 1127 int ret; 1128 1129 pci_restore_state(pdev); 1130 pci_set_power_state(pdev, PCI_D0); 1131 1132 ret = pcim_enable_device(pdev); 1133 if (ret) 1134 return ret; 1135 1136 pci_set_master(pdev); 1137 1138 return stmmac_resume(dev); 1139 } 1140 1141 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, 1142 intel_eth_pci_resume); 1143 1144 #define PCI_DEVICE_ID_INTEL_QUARK 0x0937 1145 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 1146 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 1147 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 1148 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC 1149 * which are named PSE0 and PSE1 1150 */ 1151 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 1152 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 1153 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 1154 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 1155 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 1156 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 1157 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac 1158 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 1159 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac 1160 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac 1161 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad 1162 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac 1163 1164 static const struct pci_device_id intel_eth_pci_id_table[] = { 1165 { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, 1166 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, 1167 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, 1168 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, 1169 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, 1170 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, 1171 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, 1172 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, 1173 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, 1174 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, 1175 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, 1176 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, 1177 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, 1178 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, 1179 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, 1180 { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) }, 1181 {} 1182 }; 1183 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); 1184 1185 static struct pci_driver intel_eth_pci_driver = { 1186 .name = "intel-eth-pci", 1187 .id_table = intel_eth_pci_id_table, 1188 .probe = intel_eth_pci_probe, 1189 .remove = intel_eth_pci_remove, 1190 .driver = { 1191 .pm = &intel_eth_pm_ops, 1192 }, 1193 }; 1194 1195 module_pci_driver(intel_eth_pci_driver); 1196 1197 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); 1198 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>"); 1199 MODULE_LICENSE("GPL v2"); 1200