1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This contains the functions to handle the platform driver. 4 5 Copyright (C) 2007-2011 STMicroelectronics Ltd 6 7 8 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 9 *******************************************************************************/ 10 11 #include <linux/platform_device.h> 12 #include <linux/module.h> 13 #include <linux/io.h> 14 #include <linux/of.h> 15 #include <linux/of_net.h> 16 #include <linux/of_device.h> 17 #include <linux/of_mdio.h> 18 19 #include "stmmac.h" 20 #include "stmmac_platform.h" 21 22 #ifdef CONFIG_OF 23 24 /** 25 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins 26 * @mcast_bins: Multicast filtering bins 27 * Description: 28 * this function validates the number of Multicast filtering bins specified 29 * by the configuration through the device tree. The Synopsys GMAC supports 30 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC 31 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds 32 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is 33 * invalid and will cause the filtering algorithm to use Multicast 34 * promiscuous mode. 35 */ 36 static int dwmac1000_validate_mcast_bins(int mcast_bins) 37 { 38 int x = mcast_bins; 39 40 switch (x) { 41 case HASH_TABLE_SIZE: 42 case 128: 43 case 256: 44 break; 45 default: 46 x = 0; 47 pr_info("Hash table entries set to unexpected value %d", 48 mcast_bins); 49 break; 50 } 51 return x; 52 } 53 54 /** 55 * dwmac1000_validate_ucast_entries - validate the Unicast address entries 56 * @ucast_entries: number of Unicast address entries 57 * Description: 58 * This function validates the number of Unicast address entries supported 59 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 60 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter 61 * logic. This function validates a valid, supported configuration is 62 * selected, and defaults to 1 Unicast address if an unsupported 63 * configuration is selected. 64 */ 65 static int dwmac1000_validate_ucast_entries(int ucast_entries) 66 { 67 int x = ucast_entries; 68 69 switch (x) { 70 case 1 ... 32: 71 case 64: 72 case 128: 73 break; 74 default: 75 x = 1; 76 pr_info("Unicast table entries set to unexpected value %d\n", 77 ucast_entries); 78 break; 79 } 80 return x; 81 } 82 83 /** 84 * stmmac_axi_setup - parse DT parameters for programming the AXI register 85 * @pdev: platform device 86 * Description: 87 * if required, from device-tree the AXI internal register can be tuned 88 * by using platform parameters. 89 */ 90 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) 91 { 92 struct device_node *np; 93 struct stmmac_axi *axi; 94 95 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); 96 if (!np) 97 return NULL; 98 99 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); 100 if (!axi) { 101 of_node_put(np); 102 return ERR_PTR(-ENOMEM); 103 } 104 105 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); 106 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); 107 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); 108 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); 109 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); 110 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); 111 112 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) 113 axi->axi_wr_osr_lmt = 1; 114 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) 115 axi->axi_rd_osr_lmt = 1; 116 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); 117 of_node_put(np); 118 119 return axi; 120 } 121 122 /** 123 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration 124 * @pdev: platform device 125 */ 126 static int stmmac_mtl_setup(struct platform_device *pdev, 127 struct plat_stmmacenet_data *plat) 128 { 129 struct device_node *q_node; 130 struct device_node *rx_node; 131 struct device_node *tx_node; 132 u8 queue = 0; 133 int ret = 0; 134 135 /* For backwards-compatibility with device trees that don't have any 136 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back 137 * to one RX and TX queues each. 138 */ 139 plat->rx_queues_to_use = 1; 140 plat->tx_queues_to_use = 1; 141 142 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 143 * to always set this, otherwise Queue will be classified as AVB 144 * (because MTL_QUEUE_AVB = 0). 145 */ 146 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 147 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 148 149 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 150 if (!rx_node) 151 return ret; 152 153 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); 154 if (!tx_node) { 155 of_node_put(rx_node); 156 return ret; 157 } 158 159 /* Processing RX queues common config */ 160 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 161 &plat->rx_queues_to_use)) 162 plat->rx_queues_to_use = 1; 163 164 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) 165 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 166 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) 167 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; 168 else 169 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 170 171 /* Processing individual RX queue config */ 172 for_each_child_of_node(rx_node, q_node) { 173 if (queue >= plat->rx_queues_to_use) 174 break; 175 176 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) 177 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 178 else if (of_property_read_bool(q_node, "snps,avb-algorithm")) 179 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 180 else 181 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 182 183 if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 184 &plat->rx_queues_cfg[queue].chan)) 185 plat->rx_queues_cfg[queue].chan = queue; 186 /* TODO: Dynamic mapping to be included in the future */ 187 188 if (of_property_read_u32(q_node, "snps,priority", 189 &plat->rx_queues_cfg[queue].prio)) { 190 plat->rx_queues_cfg[queue].prio = 0; 191 plat->rx_queues_cfg[queue].use_prio = false; 192 } else { 193 plat->rx_queues_cfg[queue].use_prio = true; 194 } 195 196 /* RX queue specific packet type routing */ 197 if (of_property_read_bool(q_node, "snps,route-avcp")) 198 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; 199 else if (of_property_read_bool(q_node, "snps,route-ptp")) 200 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; 201 else if (of_property_read_bool(q_node, "snps,route-dcbcp")) 202 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; 203 else if (of_property_read_bool(q_node, "snps,route-up")) 204 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; 205 else if (of_property_read_bool(q_node, "snps,route-multi-broad")) 206 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; 207 else 208 plat->rx_queues_cfg[queue].pkt_route = 0x0; 209 210 queue++; 211 } 212 if (queue != plat->rx_queues_to_use) { 213 ret = -EINVAL; 214 dev_err(&pdev->dev, "Not all RX queues were configured\n"); 215 goto out; 216 } 217 218 /* Processing TX queues common config */ 219 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 220 &plat->tx_queues_to_use)) 221 plat->tx_queues_to_use = 1; 222 223 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) 224 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 225 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) 226 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; 227 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) 228 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; 229 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) 230 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 231 else 232 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 233 234 queue = 0; 235 236 /* Processing individual TX queue config */ 237 for_each_child_of_node(tx_node, q_node) { 238 if (queue >= plat->tx_queues_to_use) 239 break; 240 241 if (of_property_read_u32(q_node, "snps,weight", 242 &plat->tx_queues_cfg[queue].weight)) 243 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 244 245 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { 246 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 247 } else if (of_property_read_bool(q_node, 248 "snps,avb-algorithm")) { 249 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 250 251 /* Credit Base Shaper parameters used by AVB */ 252 if (of_property_read_u32(q_node, "snps,send_slope", 253 &plat->tx_queues_cfg[queue].send_slope)) 254 plat->tx_queues_cfg[queue].send_slope = 0x0; 255 if (of_property_read_u32(q_node, "snps,idle_slope", 256 &plat->tx_queues_cfg[queue].idle_slope)) 257 plat->tx_queues_cfg[queue].idle_slope = 0x0; 258 if (of_property_read_u32(q_node, "snps,high_credit", 259 &plat->tx_queues_cfg[queue].high_credit)) 260 plat->tx_queues_cfg[queue].high_credit = 0x0; 261 if (of_property_read_u32(q_node, "snps,low_credit", 262 &plat->tx_queues_cfg[queue].low_credit)) 263 plat->tx_queues_cfg[queue].low_credit = 0x0; 264 } else { 265 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 266 } 267 268 if (of_property_read_u32(q_node, "snps,priority", 269 &plat->tx_queues_cfg[queue].prio)) { 270 plat->tx_queues_cfg[queue].prio = 0; 271 plat->tx_queues_cfg[queue].use_prio = false; 272 } else { 273 plat->tx_queues_cfg[queue].use_prio = true; 274 } 275 276 queue++; 277 } 278 if (queue != plat->tx_queues_to_use) { 279 ret = -EINVAL; 280 dev_err(&pdev->dev, "Not all TX queues were configured\n"); 281 goto out; 282 } 283 284 out: 285 of_node_put(rx_node); 286 of_node_put(tx_node); 287 of_node_put(q_node); 288 289 return ret; 290 } 291 292 /** 293 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 294 * @plat: driver data platform structure 295 * @np: device tree node 296 * @dev: device pointer 297 * Description: 298 * The mdio bus will be allocated in case of a phy transceiver is on board; 299 * it will be NULL if the fixed-link is configured. 300 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated 301 * in any case (for DSA, mdio must be registered even if fixed-link). 302 * The table below sums the supported configurations: 303 * ------------------------------- 304 * snps,phy-addr | Y 305 * ------------------------------- 306 * phy-handle | Y 307 * ------------------------------- 308 * fixed-link | N 309 * ------------------------------- 310 * snps,dwmac-mdio | 311 * even if | Y 312 * fixed-link | 313 * ------------------------------- 314 * 315 * It returns 0 in case of success otherwise -ENODEV. 316 */ 317 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 318 struct device_node *np, struct device *dev) 319 { 320 bool mdio = true; 321 static const struct of_device_id need_mdio_ids[] = { 322 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 323 {}, 324 }; 325 326 if (of_match_node(need_mdio_ids, np)) { 327 plat->mdio_node = of_get_child_by_name(np, "mdio"); 328 } else { 329 /** 330 * If snps,dwmac-mdio is passed from DT, always register 331 * the MDIO 332 */ 333 for_each_child_of_node(np, plat->mdio_node) { 334 if (of_device_is_compatible(plat->mdio_node, 335 "snps,dwmac-mdio")) 336 break; 337 } 338 } 339 340 if (plat->mdio_node) { 341 dev_dbg(dev, "Found MDIO subnode\n"); 342 mdio = true; 343 } 344 345 if (mdio) 346 plat->mdio_bus_data = 347 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), 348 GFP_KERNEL); 349 return 0; 350 } 351 352 /** 353 * stmmac_probe_config_dt - parse device-tree driver parameters 354 * @pdev: platform_device structure 355 * @mac: MAC address to use 356 * Description: 357 * this function is to read the driver parameters from device-tree and 358 * set some private fields that will be used by the main at runtime. 359 */ 360 struct plat_stmmacenet_data * 361 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 362 { 363 struct device_node *np = pdev->dev.of_node; 364 struct plat_stmmacenet_data *plat; 365 struct stmmac_dma_cfg *dma_cfg; 366 int rc; 367 368 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 369 if (!plat) 370 return ERR_PTR(-ENOMEM); 371 372 *mac = of_get_mac_address(np); 373 plat->interface = of_get_phy_mode(np); 374 375 /* Some wrapper drivers still rely on phy_node. Let's save it while 376 * they are not converted to phylink. */ 377 plat->phy_node = of_parse_phandle(np, "phy-handle", 0); 378 379 /* PHYLINK automatically parses the phy-handle property */ 380 plat->phylink_node = np; 381 382 /* Get max speed of operation from device tree */ 383 if (of_property_read_u32(np, "max-speed", &plat->max_speed)) 384 plat->max_speed = -1; 385 386 plat->bus_id = of_alias_get_id(np, "ethernet"); 387 if (plat->bus_id < 0) 388 plat->bus_id = 0; 389 390 /* Default to phy auto-detection */ 391 plat->phy_addr = -1; 392 393 /* Default to get clk_csr from stmmac_clk_crs_set(), 394 * or get clk_csr from device tree. 395 */ 396 plat->clk_csr = -1; 397 of_property_read_u32(np, "clk_csr", &plat->clk_csr); 398 399 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 400 * and warn of its use. Remove this when phy node support is added. 401 */ 402 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 403 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 404 405 /* To Configure PHY by using all device-tree supported properties */ 406 rc = stmmac_dt_phy(plat, np, &pdev->dev); 407 if (rc) 408 return ERR_PTR(rc); 409 410 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 411 412 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 413 414 plat->force_sf_dma_mode = 415 of_property_read_bool(np, "snps,force_sf_dma_mode"); 416 417 plat->en_tx_lpi_clockgating = 418 of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); 419 420 /* Set the maxmtu to a default of JUMBO_LEN in case the 421 * parameter is not present in the device tree. 422 */ 423 plat->maxmtu = JUMBO_LEN; 424 425 /* Set default value for multicast hash bins */ 426 plat->multicast_filter_bins = HASH_TABLE_SIZE; 427 428 /* Set default value for unicast filter entries */ 429 plat->unicast_filter_entries = 1; 430 431 /* 432 * Currently only the properties needed on SPEAr600 433 * are provided. All other properties should be added 434 * once needed on other platforms. 435 */ 436 if (of_device_is_compatible(np, "st,spear600-gmac") || 437 of_device_is_compatible(np, "snps,dwmac-3.50a") || 438 of_device_is_compatible(np, "snps,dwmac-3.70a") || 439 of_device_is_compatible(np, "snps,dwmac")) { 440 /* Note that the max-frame-size parameter as defined in the 441 * ePAPR v1.1 spec is defined as max-frame-size, it's 442 * actually used as the IEEE definition of MAC Client 443 * data, or MTU. The ePAPR specification is confusing as 444 * the definition is max-frame-size, but usage examples 445 * are clearly MTUs 446 */ 447 of_property_read_u32(np, "max-frame-size", &plat->maxmtu); 448 of_property_read_u32(np, "snps,multicast-filter-bins", 449 &plat->multicast_filter_bins); 450 of_property_read_u32(np, "snps,perfect-filter-entries", 451 &plat->unicast_filter_entries); 452 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( 453 plat->unicast_filter_entries); 454 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( 455 plat->multicast_filter_bins); 456 plat->has_gmac = 1; 457 plat->pmt = 1; 458 } 459 460 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 461 of_device_is_compatible(np, "snps,dwmac-4.10a") || 462 of_device_is_compatible(np, "snps,dwmac-4.20a")) { 463 plat->has_gmac4 = 1; 464 plat->has_gmac = 0; 465 plat->pmt = 1; 466 plat->tso_en = of_property_read_bool(np, "snps,tso"); 467 } 468 469 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 470 of_device_is_compatible(np, "snps,dwmac-3.710")) { 471 plat->enh_desc = 1; 472 plat->bugged_jumbo = 1; 473 plat->force_sf_dma_mode = 1; 474 } 475 476 if (of_device_is_compatible(np, "snps,dwxgmac")) { 477 plat->has_xgmac = 1; 478 plat->pmt = 1; 479 plat->tso_en = of_property_read_bool(np, "snps,tso"); 480 } 481 482 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 483 GFP_KERNEL); 484 if (!dma_cfg) { 485 stmmac_remove_config_dt(pdev, plat); 486 return ERR_PTR(-ENOMEM); 487 } 488 plat->dma_cfg = dma_cfg; 489 490 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 491 if (!dma_cfg->pbl) 492 dma_cfg->pbl = DEFAULT_DMA_PBL; 493 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); 494 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); 495 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); 496 497 dma_cfg->aal = of_property_read_bool(np, "snps,aal"); 498 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 499 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 500 501 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 502 if (plat->force_thresh_dma_mode) { 503 plat->force_sf_dma_mode = 0; 504 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 505 } 506 507 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); 508 509 plat->axi = stmmac_axi_setup(pdev); 510 511 rc = stmmac_mtl_setup(pdev, plat); 512 if (rc) { 513 stmmac_remove_config_dt(pdev, plat); 514 return ERR_PTR(rc); 515 } 516 517 /* clock setup */ 518 plat->stmmac_clk = devm_clk_get(&pdev->dev, 519 STMMAC_RESOURCE_NAME); 520 if (IS_ERR(plat->stmmac_clk)) { 521 dev_warn(&pdev->dev, "Cannot get CSR clock\n"); 522 plat->stmmac_clk = NULL; 523 } 524 clk_prepare_enable(plat->stmmac_clk); 525 526 plat->pclk = devm_clk_get(&pdev->dev, "pclk"); 527 if (IS_ERR(plat->pclk)) { 528 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) 529 goto error_pclk_get; 530 531 plat->pclk = NULL; 532 } 533 clk_prepare_enable(plat->pclk); 534 535 /* Fall-back to main clock in case of no PTP ref is passed */ 536 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); 537 if (IS_ERR(plat->clk_ptp_ref)) { 538 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 539 plat->clk_ptp_ref = NULL; 540 dev_warn(&pdev->dev, "PTP uses main clock\n"); 541 } else { 542 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 543 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 544 } 545 546 plat->stmmac_rst = devm_reset_control_get(&pdev->dev, 547 STMMAC_RESOURCE_NAME); 548 if (IS_ERR(plat->stmmac_rst)) { 549 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) 550 goto error_hw_init; 551 552 dev_info(&pdev->dev, "no reset control found\n"); 553 plat->stmmac_rst = NULL; 554 } 555 556 return plat; 557 558 error_hw_init: 559 clk_disable_unprepare(plat->pclk); 560 error_pclk_get: 561 clk_disable_unprepare(plat->stmmac_clk); 562 563 return ERR_PTR(-EPROBE_DEFER); 564 } 565 566 /** 567 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 568 * @pdev: platform_device structure 569 * @plat: driver data platform structure 570 * 571 * Release resources claimed by stmmac_probe_config_dt(). 572 */ 573 void stmmac_remove_config_dt(struct platform_device *pdev, 574 struct plat_stmmacenet_data *plat) 575 { 576 of_node_put(plat->phy_node); 577 of_node_put(plat->mdio_node); 578 } 579 #else 580 struct plat_stmmacenet_data * 581 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 582 { 583 return ERR_PTR(-EINVAL); 584 } 585 586 void stmmac_remove_config_dt(struct platform_device *pdev, 587 struct plat_stmmacenet_data *plat) 588 { 589 } 590 #endif /* CONFIG_OF */ 591 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 592 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 593 594 int stmmac_get_platform_resources(struct platform_device *pdev, 595 struct stmmac_resources *stmmac_res) 596 { 597 struct resource *res; 598 599 memset(stmmac_res, 0, sizeof(*stmmac_res)); 600 601 /* Get IRQ information early to have an ability to ask for deferred 602 * probe if needed before we went too far with resource allocation. 603 */ 604 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); 605 if (stmmac_res->irq < 0) { 606 if (stmmac_res->irq != -EPROBE_DEFER) { 607 dev_err(&pdev->dev, 608 "MAC IRQ configuration information not found\n"); 609 } 610 return stmmac_res->irq; 611 } 612 613 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq 614 * The external wake up irq can be passed through the platform code 615 * named as "eth_wake_irq" 616 * 617 * In case the wake up interrupt is not passed from the platform 618 * so the driver will continue to use the mac irq (ndev->irq) 619 */ 620 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 621 if (stmmac_res->wol_irq < 0) { 622 if (stmmac_res->wol_irq == -EPROBE_DEFER) 623 return -EPROBE_DEFER; 624 stmmac_res->wol_irq = stmmac_res->irq; 625 } 626 627 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 628 if (stmmac_res->lpi_irq == -EPROBE_DEFER) 629 return -EPROBE_DEFER; 630 631 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 632 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); 633 634 return PTR_ERR_OR_ZERO(stmmac_res->addr); 635 } 636 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); 637 638 /** 639 * stmmac_pltfr_remove 640 * @pdev: platform device pointer 641 * Description: this function calls the main to free the net resources 642 * and calls the platforms hook and release the resources (e.g. mem). 643 */ 644 int stmmac_pltfr_remove(struct platform_device *pdev) 645 { 646 struct net_device *ndev = platform_get_drvdata(pdev); 647 struct stmmac_priv *priv = netdev_priv(ndev); 648 struct plat_stmmacenet_data *plat = priv->plat; 649 int ret = stmmac_dvr_remove(&pdev->dev); 650 651 if (plat->exit) 652 plat->exit(pdev, plat->bsp_priv); 653 654 stmmac_remove_config_dt(pdev, plat); 655 656 return ret; 657 } 658 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); 659 660 #ifdef CONFIG_PM_SLEEP 661 /** 662 * stmmac_pltfr_suspend 663 * @dev: device pointer 664 * Description: this function is invoked when suspend the driver and it direcly 665 * call the main suspend function and then, if required, on some platform, it 666 * can call an exit helper. 667 */ 668 static int stmmac_pltfr_suspend(struct device *dev) 669 { 670 int ret; 671 struct net_device *ndev = dev_get_drvdata(dev); 672 struct stmmac_priv *priv = netdev_priv(ndev); 673 struct platform_device *pdev = to_platform_device(dev); 674 675 ret = stmmac_suspend(dev); 676 if (priv->plat->exit) 677 priv->plat->exit(pdev, priv->plat->bsp_priv); 678 679 return ret; 680 } 681 682 /** 683 * stmmac_pltfr_resume 684 * @dev: device pointer 685 * Description: this function is invoked when resume the driver before calling 686 * the main resume function, on some platforms, it can call own init helper 687 * if required. 688 */ 689 static int stmmac_pltfr_resume(struct device *dev) 690 { 691 struct net_device *ndev = dev_get_drvdata(dev); 692 struct stmmac_priv *priv = netdev_priv(ndev); 693 struct platform_device *pdev = to_platform_device(dev); 694 695 if (priv->plat->init) 696 priv->plat->init(pdev, priv->plat->bsp_priv); 697 698 return stmmac_resume(dev); 699 } 700 #endif /* CONFIG_PM_SLEEP */ 701 702 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 703 stmmac_pltfr_resume); 704 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 705 706 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); 707 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 708 MODULE_LICENSE("GPL"); 709