1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation 3 */ 4 5 #include <linux/clk-provider.h> 6 #include <linux/pci.h> 7 #include <linux/dmi.h> 8 #include "dwmac-intel.h" 9 #include "dwmac4.h" 10 #include "stmmac.h" 11 12 struct intel_priv_data { 13 int mdio_adhoc_addr; /* mdio address for serdes & etc */ 14 }; 15 16 /* This struct is used to associate PCI Function of MAC controller on a board, 17 * discovered via DMI, with the address of PHY connected to the MAC. The 18 * negative value of the address means that MAC controller is not connected 19 * with PHY. 20 */ 21 struct stmmac_pci_func_data { 22 unsigned int func; 23 int phy_addr; 24 }; 25 26 struct stmmac_pci_dmi_data { 27 const struct stmmac_pci_func_data *func; 28 size_t nfuncs; 29 }; 30 31 struct stmmac_pci_info { 32 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); 33 }; 34 35 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, 36 const struct dmi_system_id *dmi_list) 37 { 38 const struct stmmac_pci_func_data *func_data; 39 const struct stmmac_pci_dmi_data *dmi_data; 40 const struct dmi_system_id *dmi_id; 41 int func = PCI_FUNC(pdev->devfn); 42 size_t n; 43 44 dmi_id = dmi_first_match(dmi_list); 45 if (!dmi_id) 46 return -ENODEV; 47 48 dmi_data = dmi_id->driver_data; 49 func_data = dmi_data->func; 50 51 for (n = 0; n < dmi_data->nfuncs; n++, func_data++) 52 if (func_data->func == func) 53 return func_data->phy_addr; 54 55 return -ENODEV; 56 } 57 58 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, 59 int phyreg, u32 mask, u32 val) 60 { 61 unsigned int retries = 10; 62 int val_rd; 63 64 do { 65 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); 66 if ((val_rd & mask) == (val & mask)) 67 return 0; 68 udelay(POLL_DELAY_US); 69 } while (--retries); 70 71 return -ETIMEDOUT; 72 } 73 74 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) 75 { 76 struct intel_priv_data *intel_priv = priv_data; 77 struct stmmac_priv *priv = netdev_priv(ndev); 78 int serdes_phy_addr = 0; 79 u32 data = 0; 80 81 if (!intel_priv->mdio_adhoc_addr) 82 return 0; 83 84 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 85 86 /* assert clk_req */ 87 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 88 data |= SERDES_PLL_CLK; 89 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 90 91 /* check for clk_ack assertion */ 92 data = serdes_status_poll(priv, serdes_phy_addr, 93 SERDES_GSR0, 94 SERDES_PLL_CLK, 95 SERDES_PLL_CLK); 96 97 if (data) { 98 dev_err(priv->device, "Serdes PLL clk request timeout\n"); 99 return data; 100 } 101 102 /* assert lane reset */ 103 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 104 data |= SERDES_RST; 105 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 106 107 /* check for assert lane reset reflection */ 108 data = serdes_status_poll(priv, serdes_phy_addr, 109 SERDES_GSR0, 110 SERDES_RST, 111 SERDES_RST); 112 113 if (data) { 114 dev_err(priv->device, "Serdes assert lane reset timeout\n"); 115 return data; 116 } 117 118 /* move power state to P0 */ 119 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 120 121 data &= ~SERDES_PWR_ST_MASK; 122 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; 123 124 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 125 126 /* Check for P0 state */ 127 data = serdes_status_poll(priv, serdes_phy_addr, 128 SERDES_GSR0, 129 SERDES_PWR_ST_MASK, 130 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); 131 132 if (data) { 133 dev_err(priv->device, "Serdes power state P0 timeout.\n"); 134 return data; 135 } 136 137 return 0; 138 } 139 140 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) 141 { 142 struct intel_priv_data *intel_priv = intel_data; 143 struct stmmac_priv *priv = netdev_priv(ndev); 144 int serdes_phy_addr = 0; 145 u32 data = 0; 146 147 if (!intel_priv->mdio_adhoc_addr) 148 return; 149 150 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 151 152 /* move power state to P3 */ 153 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 154 155 data &= ~SERDES_PWR_ST_MASK; 156 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; 157 158 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 159 160 /* Check for P3 state */ 161 data = serdes_status_poll(priv, serdes_phy_addr, 162 SERDES_GSR0, 163 SERDES_PWR_ST_MASK, 164 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); 165 166 if (data) { 167 dev_err(priv->device, "Serdes power state P3 timeout\n"); 168 return; 169 } 170 171 /* de-assert clk_req */ 172 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 173 data &= ~SERDES_PLL_CLK; 174 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 175 176 /* check for clk_ack de-assert */ 177 data = serdes_status_poll(priv, serdes_phy_addr, 178 SERDES_GSR0, 179 SERDES_PLL_CLK, 180 (u32)~SERDES_PLL_CLK); 181 182 if (data) { 183 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); 184 return; 185 } 186 187 /* de-assert lane reset */ 188 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 189 data &= ~SERDES_RST; 190 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 191 192 /* check for de-assert lane reset reflection */ 193 data = serdes_status_poll(priv, serdes_phy_addr, 194 SERDES_GSR0, 195 SERDES_RST, 196 (u32)~SERDES_RST); 197 198 if (data) { 199 dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); 200 return; 201 } 202 } 203 204 static void common_default_data(struct plat_stmmacenet_data *plat) 205 { 206 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 207 plat->has_gmac = 1; 208 plat->force_sf_dma_mode = 1; 209 210 plat->mdio_bus_data->needs_reset = true; 211 212 /* Set default value for multicast hash bins */ 213 plat->multicast_filter_bins = HASH_TABLE_SIZE; 214 215 /* Set default value for unicast filter entries */ 216 plat->unicast_filter_entries = 1; 217 218 /* Set the maxmtu to a default of JUMBO_LEN */ 219 plat->maxmtu = JUMBO_LEN; 220 221 /* Set default number of RX and TX queues to use */ 222 plat->tx_queues_to_use = 1; 223 plat->rx_queues_to_use = 1; 224 225 /* Disable Priority config by default */ 226 plat->tx_queues_cfg[0].use_prio = false; 227 plat->rx_queues_cfg[0].use_prio = false; 228 229 /* Disable RX queues routing by default */ 230 plat->rx_queues_cfg[0].pkt_route = 0x0; 231 } 232 233 static int intel_mgbe_common_data(struct pci_dev *pdev, 234 struct plat_stmmacenet_data *plat) 235 { 236 int ret; 237 int i; 238 239 plat->phy_addr = -1; 240 plat->clk_csr = 5; 241 plat->has_gmac = 0; 242 plat->has_gmac4 = 1; 243 plat->force_sf_dma_mode = 0; 244 plat->tso_en = 1; 245 246 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 247 248 for (i = 0; i < plat->rx_queues_to_use; i++) { 249 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 250 plat->rx_queues_cfg[i].chan = i; 251 252 /* Disable Priority config by default */ 253 plat->rx_queues_cfg[i].use_prio = false; 254 255 /* Disable RX queues routing by default */ 256 plat->rx_queues_cfg[i].pkt_route = 0x0; 257 } 258 259 for (i = 0; i < plat->tx_queues_to_use; i++) { 260 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 261 262 /* Disable Priority config by default */ 263 plat->tx_queues_cfg[i].use_prio = false; 264 } 265 266 /* FIFO size is 4096 bytes for 1 tx/rx queue */ 267 plat->tx_fifo_size = plat->tx_queues_to_use * 4096; 268 plat->rx_fifo_size = plat->rx_queues_to_use * 4096; 269 270 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 271 plat->tx_queues_cfg[0].weight = 0x09; 272 plat->tx_queues_cfg[1].weight = 0x0A; 273 plat->tx_queues_cfg[2].weight = 0x0B; 274 plat->tx_queues_cfg[3].weight = 0x0C; 275 plat->tx_queues_cfg[4].weight = 0x0D; 276 plat->tx_queues_cfg[5].weight = 0x0E; 277 plat->tx_queues_cfg[6].weight = 0x0F; 278 plat->tx_queues_cfg[7].weight = 0x10; 279 280 plat->dma_cfg->pbl = 32; 281 plat->dma_cfg->pblx8 = true; 282 plat->dma_cfg->fixed_burst = 0; 283 plat->dma_cfg->mixed_burst = 0; 284 plat->dma_cfg->aal = 0; 285 286 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), 287 GFP_KERNEL); 288 if (!plat->axi) 289 return -ENOMEM; 290 291 plat->axi->axi_lpi_en = 0; 292 plat->axi->axi_xit_frm = 0; 293 plat->axi->axi_wr_osr_lmt = 1; 294 plat->axi->axi_rd_osr_lmt = 1; 295 plat->axi->axi_blen[0] = 4; 296 plat->axi->axi_blen[1] = 8; 297 plat->axi->axi_blen[2] = 16; 298 299 plat->ptp_max_adj = plat->clk_ptp_rate; 300 plat->eee_usecs_rate = plat->clk_ptp_rate; 301 302 /* Set system clock */ 303 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, 304 "stmmac-clk", NULL, 0, 305 plat->clk_ptp_rate); 306 307 if (IS_ERR(plat->stmmac_clk)) { 308 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); 309 plat->stmmac_clk = NULL; 310 } 311 312 ret = clk_prepare_enable(plat->stmmac_clk); 313 if (ret) { 314 clk_unregister_fixed_rate(plat->stmmac_clk); 315 return ret; 316 } 317 318 /* Set default value for multicast hash bins */ 319 plat->multicast_filter_bins = HASH_TABLE_SIZE; 320 321 /* Set default value for unicast filter entries */ 322 plat->unicast_filter_entries = 1; 323 324 /* Set the maxmtu to a default of JUMBO_LEN */ 325 plat->maxmtu = JUMBO_LEN; 326 327 plat->vlan_fail_q_en = true; 328 329 /* Use the last Rx queue */ 330 plat->vlan_fail_q = plat->rx_queues_to_use - 1; 331 332 return 0; 333 } 334 335 static int ehl_common_data(struct pci_dev *pdev, 336 struct plat_stmmacenet_data *plat) 337 { 338 plat->rx_queues_to_use = 8; 339 plat->tx_queues_to_use = 8; 340 plat->clk_ptp_rate = 200000000; 341 342 return intel_mgbe_common_data(pdev, plat); 343 } 344 345 static int ehl_sgmii_data(struct pci_dev *pdev, 346 struct plat_stmmacenet_data *plat) 347 { 348 plat->bus_id = 1; 349 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 350 351 plat->serdes_powerup = intel_serdes_powerup; 352 plat->serdes_powerdown = intel_serdes_powerdown; 353 354 return ehl_common_data(pdev, plat); 355 } 356 357 static struct stmmac_pci_info ehl_sgmii1g_info = { 358 .setup = ehl_sgmii_data, 359 }; 360 361 static int ehl_rgmii_data(struct pci_dev *pdev, 362 struct plat_stmmacenet_data *plat) 363 { 364 plat->bus_id = 1; 365 plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 366 367 return ehl_common_data(pdev, plat); 368 } 369 370 static struct stmmac_pci_info ehl_rgmii1g_info = { 371 .setup = ehl_rgmii_data, 372 }; 373 374 static int ehl_pse0_common_data(struct pci_dev *pdev, 375 struct plat_stmmacenet_data *plat) 376 { 377 plat->bus_id = 2; 378 return ehl_common_data(pdev, plat); 379 } 380 381 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, 382 struct plat_stmmacenet_data *plat) 383 { 384 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 385 return ehl_pse0_common_data(pdev, plat); 386 } 387 388 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { 389 .setup = ehl_pse0_rgmii1g_data, 390 }; 391 392 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, 393 struct plat_stmmacenet_data *plat) 394 { 395 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 396 plat->serdes_powerup = intel_serdes_powerup; 397 plat->serdes_powerdown = intel_serdes_powerdown; 398 return ehl_pse0_common_data(pdev, plat); 399 } 400 401 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { 402 .setup = ehl_pse0_sgmii1g_data, 403 }; 404 405 static int ehl_pse1_common_data(struct pci_dev *pdev, 406 struct plat_stmmacenet_data *plat) 407 { 408 plat->bus_id = 3; 409 return ehl_common_data(pdev, plat); 410 } 411 412 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, 413 struct plat_stmmacenet_data *plat) 414 { 415 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 416 return ehl_pse1_common_data(pdev, plat); 417 } 418 419 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { 420 .setup = ehl_pse1_rgmii1g_data, 421 }; 422 423 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, 424 struct plat_stmmacenet_data *plat) 425 { 426 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 427 plat->serdes_powerup = intel_serdes_powerup; 428 plat->serdes_powerdown = intel_serdes_powerdown; 429 return ehl_pse1_common_data(pdev, plat); 430 } 431 432 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { 433 .setup = ehl_pse1_sgmii1g_data, 434 }; 435 436 static int tgl_common_data(struct pci_dev *pdev, 437 struct plat_stmmacenet_data *plat) 438 { 439 plat->rx_queues_to_use = 6; 440 plat->tx_queues_to_use = 4; 441 plat->clk_ptp_rate = 200000000; 442 443 return intel_mgbe_common_data(pdev, plat); 444 } 445 446 static int tgl_sgmii_data(struct pci_dev *pdev, 447 struct plat_stmmacenet_data *plat) 448 { 449 plat->bus_id = 1; 450 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 451 plat->serdes_powerup = intel_serdes_powerup; 452 plat->serdes_powerdown = intel_serdes_powerdown; 453 return tgl_common_data(pdev, plat); 454 } 455 456 static struct stmmac_pci_info tgl_sgmii1g_info = { 457 .setup = tgl_sgmii_data, 458 }; 459 460 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { 461 { 462 .func = 6, 463 .phy_addr = 1, 464 }, 465 }; 466 467 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { 468 .func = galileo_stmmac_func_data, 469 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), 470 }; 471 472 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { 473 { 474 .func = 6, 475 .phy_addr = 1, 476 }, 477 { 478 .func = 7, 479 .phy_addr = 1, 480 }, 481 }; 482 483 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { 484 .func = iot2040_stmmac_func_data, 485 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), 486 }; 487 488 static const struct dmi_system_id quark_pci_dmi[] = { 489 { 490 .matches = { 491 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), 492 }, 493 .driver_data = (void *)&galileo_stmmac_dmi_data, 494 }, 495 { 496 .matches = { 497 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 498 }, 499 .driver_data = (void *)&galileo_stmmac_dmi_data, 500 }, 501 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. 502 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 503 * has only one pci network device while other asset tags are 504 * for IOT2040 which has two. 505 */ 506 { 507 .matches = { 508 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 509 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, 510 "6ES7647-0AA00-0YA2"), 511 }, 512 .driver_data = (void *)&galileo_stmmac_dmi_data, 513 }, 514 { 515 .matches = { 516 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 517 }, 518 .driver_data = (void *)&iot2040_stmmac_dmi_data, 519 }, 520 {} 521 }; 522 523 static int quark_default_data(struct pci_dev *pdev, 524 struct plat_stmmacenet_data *plat) 525 { 526 int ret; 527 528 /* Set common default data first */ 529 common_default_data(plat); 530 531 /* Refuse to load the driver and register net device if MAC controller 532 * does not connect to any PHY interface. 533 */ 534 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); 535 if (ret < 0) { 536 /* Return error to the caller on DMI enabled boards. */ 537 if (dmi_get_system_info(DMI_BOARD_NAME)) 538 return ret; 539 540 /* Galileo boards with old firmware don't support DMI. We always 541 * use 1 here as PHY address, so at least the first found MAC 542 * controller would be probed. 543 */ 544 ret = 1; 545 } 546 547 plat->bus_id = pci_dev_id(pdev); 548 plat->phy_addr = ret; 549 plat->phy_interface = PHY_INTERFACE_MODE_RMII; 550 551 plat->dma_cfg->pbl = 16; 552 plat->dma_cfg->pblx8 = true; 553 plat->dma_cfg->fixed_burst = 1; 554 /* AXI (TODO) */ 555 556 return 0; 557 } 558 559 static const struct stmmac_pci_info quark_info = { 560 .setup = quark_default_data, 561 }; 562 563 /** 564 * intel_eth_pci_probe 565 * 566 * @pdev: pci device pointer 567 * @id: pointer to table of device id/id's. 568 * 569 * Description: This probing function gets called for all PCI devices which 570 * match the ID table and are not "owned" by other driver yet. This function 571 * gets passed a "struct pci_dev *" for each device whose entry in the ID table 572 * matches the device. The probe functions returns zero when the driver choose 573 * to take "ownership" of the device or an error code(-ve no) otherwise. 574 */ 575 static int intel_eth_pci_probe(struct pci_dev *pdev, 576 const struct pci_device_id *id) 577 { 578 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; 579 struct intel_priv_data *intel_priv; 580 struct plat_stmmacenet_data *plat; 581 struct stmmac_resources res; 582 int ret; 583 584 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); 585 if (!intel_priv) 586 return -ENOMEM; 587 588 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 589 if (!plat) 590 return -ENOMEM; 591 592 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 593 sizeof(*plat->mdio_bus_data), 594 GFP_KERNEL); 595 if (!plat->mdio_bus_data) 596 return -ENOMEM; 597 598 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), 599 GFP_KERNEL); 600 if (!plat->dma_cfg) 601 return -ENOMEM; 602 603 /* Enable pci device */ 604 ret = pci_enable_device(pdev); 605 if (ret) { 606 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 607 __func__); 608 return ret; 609 } 610 611 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 612 if (ret) 613 return ret; 614 615 pci_set_master(pdev); 616 617 plat->bsp_priv = intel_priv; 618 intel_priv->mdio_adhoc_addr = 0x15; 619 620 ret = info->setup(pdev, plat); 621 if (ret) 622 return ret; 623 624 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 625 if (ret < 0) 626 return ret; 627 628 memset(&res, 0, sizeof(res)); 629 res.addr = pcim_iomap_table(pdev)[0]; 630 res.wol_irq = pci_irq_vector(pdev, 0); 631 res.irq = pci_irq_vector(pdev, 0); 632 633 if (plat->eee_usecs_rate > 0) { 634 u32 tx_lpi_usec; 635 636 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; 637 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); 638 } 639 640 ret = stmmac_dvr_probe(&pdev->dev, plat, &res); 641 if (ret) { 642 pci_free_irq_vectors(pdev); 643 clk_disable_unprepare(plat->stmmac_clk); 644 clk_unregister_fixed_rate(plat->stmmac_clk); 645 } 646 647 return ret; 648 } 649 650 /** 651 * intel_eth_pci_remove 652 * 653 * @pdev: platform device pointer 654 * Description: this function calls the main to free the net resources 655 * and releases the PCI resources. 656 */ 657 static void intel_eth_pci_remove(struct pci_dev *pdev) 658 { 659 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 660 struct stmmac_priv *priv = netdev_priv(ndev); 661 662 stmmac_dvr_remove(&pdev->dev); 663 664 pci_free_irq_vectors(pdev); 665 666 clk_unregister_fixed_rate(priv->plat->stmmac_clk); 667 668 pcim_iounmap_regions(pdev, BIT(0)); 669 670 pci_disable_device(pdev); 671 } 672 673 static int __maybe_unused intel_eth_pci_suspend(struct device *dev) 674 { 675 struct pci_dev *pdev = to_pci_dev(dev); 676 int ret; 677 678 ret = stmmac_suspend(dev); 679 if (ret) 680 return ret; 681 682 ret = pci_save_state(pdev); 683 if (ret) 684 return ret; 685 686 pci_disable_device(pdev); 687 pci_wake_from_d3(pdev, true); 688 return 0; 689 } 690 691 static int __maybe_unused intel_eth_pci_resume(struct device *dev) 692 { 693 struct pci_dev *pdev = to_pci_dev(dev); 694 int ret; 695 696 pci_restore_state(pdev); 697 pci_set_power_state(pdev, PCI_D0); 698 699 ret = pci_enable_device(pdev); 700 if (ret) 701 return ret; 702 703 pci_set_master(pdev); 704 705 return stmmac_resume(dev); 706 } 707 708 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, 709 intel_eth_pci_resume); 710 711 #define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937 712 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30 713 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31 714 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32 715 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC 716 * which are named PSE0 and PSE1 717 */ 718 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0 719 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1 720 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2 721 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0 722 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1 723 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2 724 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac 725 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2 726 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac 727 728 static const struct pci_device_id intel_eth_pci_id_table[] = { 729 { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) }, 730 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) }, 731 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) }, 732 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) }, 733 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) }, 734 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) }, 735 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) }, 736 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) }, 737 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) }, 738 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) }, 739 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) }, 740 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) }, 741 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) }, 742 {} 743 }; 744 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); 745 746 static struct pci_driver intel_eth_pci_driver = { 747 .name = "intel-eth-pci", 748 .id_table = intel_eth_pci_id_table, 749 .probe = intel_eth_pci_probe, 750 .remove = intel_eth_pci_remove, 751 .driver = { 752 .pm = &intel_eth_pm_ops, 753 }, 754 }; 755 756 module_pci_driver(intel_eth_pci_driver); 757 758 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); 759 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>"); 760 MODULE_LICENSE("GPL v2"); 761