1 /******************************************************************************* 2 This contains the functions to handle the platform driver. 3 4 Copyright (C) 2007-2011 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 The full GNU General Public License is included in this distribution in 16 the file called "COPYING". 17 18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 19 *******************************************************************************/ 20 21 #include <linux/platform_device.h> 22 #include <linux/module.h> 23 #include <linux/io.h> 24 #include <linux/of.h> 25 #include <linux/of_net.h> 26 #include <linux/of_device.h> 27 #include <linux/of_mdio.h> 28 29 #include "stmmac.h" 30 #include "stmmac_platform.h" 31 32 #ifdef CONFIG_OF 33 34 /** 35 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins 36 * @mcast_bins: Multicast filtering bins 37 * Description: 38 * this function validates the number of Multicast filtering bins specified 39 * by the configuration through the device tree. The Synopsys GMAC supports 40 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC 41 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds 42 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is 43 * invalid and will cause the filtering algorithm to use Multicast 44 * promiscuous mode. 45 */ 46 static int dwmac1000_validate_mcast_bins(int mcast_bins) 47 { 48 int x = mcast_bins; 49 50 switch (x) { 51 case HASH_TABLE_SIZE: 52 case 128: 53 case 256: 54 break; 55 default: 56 x = 0; 57 pr_info("Hash table entries set to unexpected value %d", 58 mcast_bins); 59 break; 60 } 61 return x; 62 } 63 64 /** 65 * dwmac1000_validate_ucast_entries - validate the Unicast address entries 66 * @ucast_entries: number of Unicast address entries 67 * Description: 68 * This function validates the number of Unicast address entries supported 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 71 * logic. This function validates a valid, supported configuration is 72 * selected, and defaults to 1 Unicast address if an unsupported 73 * configuration is selected. 74 */ 75 static int dwmac1000_validate_ucast_entries(int ucast_entries) 76 { 77 int x = ucast_entries; 78 79 switch (x) { 80 case 1: 81 case 32: 82 case 64: 83 case 128: 84 break; 85 default: 86 x = 1; 87 pr_info("Unicast table entries set to unexpected value %d\n", 88 ucast_entries); 89 break; 90 } 91 return x; 92 } 93 94 /** 95 * stmmac_axi_setup - parse DT parameters for programming the AXI register 96 * @pdev: platform device 97 * @priv: driver private struct. 98 * Description: 99 * if required, from device-tree the AXI internal register can be tuned 100 * by using platform parameters. 101 */ 102 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) 103 { 104 struct device_node *np; 105 struct stmmac_axi *axi; 106 107 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); 108 if (!np) 109 return NULL; 110 111 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); 112 if (!axi) { 113 of_node_put(np); 114 return ERR_PTR(-ENOMEM); 115 } 116 117 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); 118 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); 119 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); 120 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); 121 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); 122 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); 123 124 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) 125 axi->axi_wr_osr_lmt = 1; 126 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) 127 axi->axi_rd_osr_lmt = 1; 128 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); 129 of_node_put(np); 130 131 return axi; 132 } 133 134 /** 135 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration 136 * @pdev: platform device 137 */ 138 static void stmmac_mtl_setup(struct platform_device *pdev, 139 struct plat_stmmacenet_data *plat) 140 { 141 struct device_node *q_node; 142 struct device_node *rx_node; 143 struct device_node *tx_node; 144 u8 queue = 0; 145 146 /* For backwards-compatibility with device trees that don't have any 147 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back 148 * to one RX and TX queues each. 149 */ 150 plat->rx_queues_to_use = 1; 151 plat->tx_queues_to_use = 1; 152 153 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 154 * to always set this, otherwise Queue will be classified as AVB 155 * (because MTL_QUEUE_AVB = 0). 156 */ 157 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 158 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 159 160 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 161 if (!rx_node) 162 return; 163 164 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); 165 if (!tx_node) { 166 of_node_put(rx_node); 167 return; 168 } 169 170 /* Processing RX queues common config */ 171 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 172 &plat->rx_queues_to_use)) 173 plat->rx_queues_to_use = 1; 174 175 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) 176 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 177 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) 178 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; 179 else 180 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 181 182 /* Processing individual RX queue config */ 183 for_each_child_of_node(rx_node, q_node) { 184 if (queue >= plat->rx_queues_to_use) 185 break; 186 187 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) 188 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 189 else if (of_property_read_bool(q_node, "snps,avb-algorithm")) 190 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 191 else 192 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 193 194 if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 195 &plat->rx_queues_cfg[queue].chan)) 196 plat->rx_queues_cfg[queue].chan = queue; 197 /* TODO: Dynamic mapping to be included in the future */ 198 199 if (of_property_read_u32(q_node, "snps,priority", 200 &plat->rx_queues_cfg[queue].prio)) { 201 plat->rx_queues_cfg[queue].prio = 0; 202 plat->rx_queues_cfg[queue].use_prio = false; 203 } else { 204 plat->rx_queues_cfg[queue].use_prio = true; 205 } 206 207 /* RX queue specific packet type routing */ 208 if (of_property_read_bool(q_node, "snps,route-avcp")) 209 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; 210 else if (of_property_read_bool(q_node, "snps,route-ptp")) 211 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; 212 else if (of_property_read_bool(q_node, "snps,route-dcbcp")) 213 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; 214 else if (of_property_read_bool(q_node, "snps,route-up")) 215 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; 216 else if (of_property_read_bool(q_node, "snps,route-multi-broad")) 217 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; 218 else 219 plat->rx_queues_cfg[queue].pkt_route = 0x0; 220 221 queue++; 222 } 223 224 /* Processing TX queues common config */ 225 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 226 &plat->tx_queues_to_use)) 227 plat->tx_queues_to_use = 1; 228 229 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) 230 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 231 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) 232 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; 233 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) 234 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; 235 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) 236 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 237 else 238 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 239 240 queue = 0; 241 242 /* Processing individual TX queue config */ 243 for_each_child_of_node(tx_node, q_node) { 244 if (queue >= plat->tx_queues_to_use) 245 break; 246 247 if (of_property_read_u32(q_node, "snps,weight", 248 &plat->tx_queues_cfg[queue].weight)) 249 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 250 251 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { 252 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 253 } else if (of_property_read_bool(q_node, 254 "snps,avb-algorithm")) { 255 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 256 257 /* Credit Base Shaper parameters used by AVB */ 258 if (of_property_read_u32(q_node, "snps,send_slope", 259 &plat->tx_queues_cfg[queue].send_slope)) 260 plat->tx_queues_cfg[queue].send_slope = 0x0; 261 if (of_property_read_u32(q_node, "snps,idle_slope", 262 &plat->tx_queues_cfg[queue].idle_slope)) 263 plat->tx_queues_cfg[queue].idle_slope = 0x0; 264 if (of_property_read_u32(q_node, "snps,high_credit", 265 &plat->tx_queues_cfg[queue].high_credit)) 266 plat->tx_queues_cfg[queue].high_credit = 0x0; 267 if (of_property_read_u32(q_node, "snps,low_credit", 268 &plat->tx_queues_cfg[queue].low_credit)) 269 plat->tx_queues_cfg[queue].low_credit = 0x0; 270 } else { 271 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 272 } 273 274 if (of_property_read_u32(q_node, "snps,priority", 275 &plat->tx_queues_cfg[queue].prio)) { 276 plat->tx_queues_cfg[queue].prio = 0; 277 plat->tx_queues_cfg[queue].use_prio = false; 278 } else { 279 plat->tx_queues_cfg[queue].use_prio = true; 280 } 281 282 queue++; 283 } 284 285 of_node_put(rx_node); 286 of_node_put(tx_node); 287 of_node_put(q_node); 288 } 289 290 /** 291 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 292 * @plat: driver data platform structure 293 * @np: device tree node 294 * @dev: device pointer 295 * Description: 296 * The mdio bus will be allocated in case of a phy transceiver is on board; 297 * it will be NULL if the fixed-link is configured. 298 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated 299 * in any case (for DSA, mdio must be registered even if fixed-link). 300 * The table below sums the supported configurations: 301 * ------------------------------- 302 * snps,phy-addr | Y 303 * ------------------------------- 304 * phy-handle | Y 305 * ------------------------------- 306 * fixed-link | N 307 * ------------------------------- 308 * snps,dwmac-mdio | 309 * even if | Y 310 * fixed-link | 311 * ------------------------------- 312 * 313 * It returns 0 in case of success otherwise -ENODEV. 314 */ 315 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 316 struct device_node *np, struct device *dev) 317 { 318 bool mdio = true; 319 static const struct of_device_id need_mdio_ids[] = { 320 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 321 {}, 322 }; 323 324 /* If phy-handle property is passed from DT, use it as the PHY */ 325 plat->phy_node = of_parse_phandle(np, "phy-handle", 0); 326 if (plat->phy_node) 327 dev_dbg(dev, "Found phy-handle subnode\n"); 328 329 /* If phy-handle is not specified, check if we have a fixed-phy */ 330 if (!plat->phy_node && of_phy_is_fixed_link(np)) { 331 if ((of_phy_register_fixed_link(np) < 0)) 332 return -ENODEV; 333 334 dev_dbg(dev, "Found fixed-link subnode\n"); 335 plat->phy_node = of_node_get(np); 336 mdio = false; 337 } 338 339 if (of_match_node(need_mdio_ids, np)) { 340 plat->mdio_node = of_get_child_by_name(np, "mdio"); 341 } else { 342 /** 343 * If snps,dwmac-mdio is passed from DT, always register 344 * the MDIO 345 */ 346 for_each_child_of_node(np, plat->mdio_node) { 347 if (of_device_is_compatible(plat->mdio_node, 348 "snps,dwmac-mdio")) 349 break; 350 } 351 } 352 353 if (plat->mdio_node) { 354 dev_dbg(dev, "Found MDIO subnode\n"); 355 mdio = true; 356 } 357 358 if (mdio) 359 plat->mdio_bus_data = 360 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), 361 GFP_KERNEL); 362 return 0; 363 } 364 365 /** 366 * stmmac_probe_config_dt - parse device-tree driver parameters 367 * @pdev: platform_device structure 368 * @mac: MAC address to use 369 * Description: 370 * this function is to read the driver parameters from device-tree and 371 * set some private fields that will be used by the main at runtime. 372 */ 373 struct plat_stmmacenet_data * 374 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 375 { 376 struct device_node *np = pdev->dev.of_node; 377 struct plat_stmmacenet_data *plat; 378 struct stmmac_dma_cfg *dma_cfg; 379 380 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 381 if (!plat) 382 return ERR_PTR(-ENOMEM); 383 384 *mac = of_get_mac_address(np); 385 plat->interface = of_get_phy_mode(np); 386 387 /* Get max speed of operation from device tree */ 388 if (of_property_read_u32(np, "max-speed", &plat->max_speed)) 389 plat->max_speed = -1; 390 391 plat->bus_id = of_alias_get_id(np, "ethernet"); 392 if (plat->bus_id < 0) 393 plat->bus_id = 0; 394 395 /* Default to phy auto-detection */ 396 plat->phy_addr = -1; 397 398 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 399 * and warn of its use. Remove this when phy node support is added. 400 */ 401 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 402 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 403 404 /* To Configure PHY by using all device-tree supported properties */ 405 if (stmmac_dt_phy(plat, np, &pdev->dev)) 406 return ERR_PTR(-ENODEV); 407 408 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 409 410 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 411 412 plat->force_sf_dma_mode = 413 of_property_read_bool(np, "snps,force_sf_dma_mode"); 414 415 plat->en_tx_lpi_clockgating = 416 of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); 417 418 /* Set the maxmtu to a default of JUMBO_LEN in case the 419 * parameter is not present in the device tree. 420 */ 421 plat->maxmtu = JUMBO_LEN; 422 423 /* Set default value for multicast hash bins */ 424 plat->multicast_filter_bins = HASH_TABLE_SIZE; 425 426 /* Set default value for unicast filter entries */ 427 plat->unicast_filter_entries = 1; 428 429 /* 430 * Currently only the properties needed on SPEAr600 431 * are provided. All other properties should be added 432 * once needed on other platforms. 433 */ 434 if (of_device_is_compatible(np, "st,spear600-gmac") || 435 of_device_is_compatible(np, "snps,dwmac-3.50a") || 436 of_device_is_compatible(np, "snps,dwmac-3.70a") || 437 of_device_is_compatible(np, "snps,dwmac")) { 438 /* Note that the max-frame-size parameter as defined in the 439 * ePAPR v1.1 spec is defined as max-frame-size, it's 440 * actually used as the IEEE definition of MAC Client 441 * data, or MTU. The ePAPR specification is confusing as 442 * the definition is max-frame-size, but usage examples 443 * are clearly MTUs 444 */ 445 of_property_read_u32(np, "max-frame-size", &plat->maxmtu); 446 of_property_read_u32(np, "snps,multicast-filter-bins", 447 &plat->multicast_filter_bins); 448 of_property_read_u32(np, "snps,perfect-filter-entries", 449 &plat->unicast_filter_entries); 450 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( 451 plat->unicast_filter_entries); 452 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( 453 plat->multicast_filter_bins); 454 plat->has_gmac = 1; 455 plat->pmt = 1; 456 } 457 458 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 459 of_device_is_compatible(np, "snps,dwmac-4.10a")) { 460 plat->has_gmac4 = 1; 461 plat->has_gmac = 0; 462 plat->pmt = 1; 463 plat->tso_en = of_property_read_bool(np, "snps,tso"); 464 } 465 466 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 467 of_device_is_compatible(np, "snps,dwmac-3.710")) { 468 plat->enh_desc = 1; 469 plat->bugged_jumbo = 1; 470 plat->force_sf_dma_mode = 1; 471 } 472 473 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 474 GFP_KERNEL); 475 if (!dma_cfg) { 476 stmmac_remove_config_dt(pdev, plat); 477 return ERR_PTR(-ENOMEM); 478 } 479 plat->dma_cfg = dma_cfg; 480 481 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 482 if (!dma_cfg->pbl) 483 dma_cfg->pbl = DEFAULT_DMA_PBL; 484 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); 485 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); 486 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); 487 488 dma_cfg->aal = of_property_read_bool(np, "snps,aal"); 489 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 490 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 491 492 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 493 if (plat->force_thresh_dma_mode) { 494 plat->force_sf_dma_mode = 0; 495 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 496 } 497 498 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); 499 500 plat->axi = stmmac_axi_setup(pdev); 501 502 stmmac_mtl_setup(pdev, plat); 503 504 /* clock setup */ 505 plat->stmmac_clk = devm_clk_get(&pdev->dev, 506 STMMAC_RESOURCE_NAME); 507 if (IS_ERR(plat->stmmac_clk)) { 508 dev_warn(&pdev->dev, "Cannot get CSR clock\n"); 509 plat->stmmac_clk = NULL; 510 } 511 clk_prepare_enable(plat->stmmac_clk); 512 513 plat->pclk = devm_clk_get(&pdev->dev, "pclk"); 514 if (IS_ERR(plat->pclk)) { 515 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) 516 goto error_pclk_get; 517 518 plat->pclk = NULL; 519 } 520 clk_prepare_enable(plat->pclk); 521 522 /* Fall-back to main clock in case of no PTP ref is passed */ 523 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); 524 if (IS_ERR(plat->clk_ptp_ref)) { 525 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 526 plat->clk_ptp_ref = NULL; 527 dev_warn(&pdev->dev, "PTP uses main clock\n"); 528 } else { 529 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 530 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 531 } 532 533 plat->stmmac_rst = devm_reset_control_get(&pdev->dev, 534 STMMAC_RESOURCE_NAME); 535 if (IS_ERR(plat->stmmac_rst)) { 536 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) 537 goto error_hw_init; 538 539 dev_info(&pdev->dev, "no reset control found\n"); 540 plat->stmmac_rst = NULL; 541 } 542 543 return plat; 544 545 error_hw_init: 546 clk_disable_unprepare(plat->pclk); 547 error_pclk_get: 548 clk_disable_unprepare(plat->stmmac_clk); 549 550 return ERR_PTR(-EPROBE_DEFER); 551 } 552 553 /** 554 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 555 * @pdev: platform_device structure 556 * @plat: driver data platform structure 557 * 558 * Release resources claimed by stmmac_probe_config_dt(). 559 */ 560 void stmmac_remove_config_dt(struct platform_device *pdev, 561 struct plat_stmmacenet_data *plat) 562 { 563 struct device_node *np = pdev->dev.of_node; 564 565 if (of_phy_is_fixed_link(np)) 566 of_phy_deregister_fixed_link(np); 567 of_node_put(plat->phy_node); 568 of_node_put(plat->mdio_node); 569 } 570 #else 571 struct plat_stmmacenet_data * 572 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 573 { 574 return ERR_PTR(-EINVAL); 575 } 576 577 void stmmac_remove_config_dt(struct platform_device *pdev, 578 struct plat_stmmacenet_data *plat) 579 { 580 } 581 #endif /* CONFIG_OF */ 582 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 583 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 584 585 int stmmac_get_platform_resources(struct platform_device *pdev, 586 struct stmmac_resources *stmmac_res) 587 { 588 struct resource *res; 589 590 memset(stmmac_res, 0, sizeof(*stmmac_res)); 591 592 /* Get IRQ information early to have an ability to ask for deferred 593 * probe if needed before we went too far with resource allocation. 594 */ 595 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); 596 if (stmmac_res->irq < 0) { 597 if (stmmac_res->irq != -EPROBE_DEFER) { 598 dev_err(&pdev->dev, 599 "MAC IRQ configuration information not found\n"); 600 } 601 return stmmac_res->irq; 602 } 603 604 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq 605 * The external wake up irq can be passed through the platform code 606 * named as "eth_wake_irq" 607 * 608 * In case the wake up interrupt is not passed from the platform 609 * so the driver will continue to use the mac irq (ndev->irq) 610 */ 611 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 612 if (stmmac_res->wol_irq < 0) { 613 if (stmmac_res->wol_irq == -EPROBE_DEFER) 614 return -EPROBE_DEFER; 615 stmmac_res->wol_irq = stmmac_res->irq; 616 } 617 618 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 619 if (stmmac_res->lpi_irq == -EPROBE_DEFER) 620 return -EPROBE_DEFER; 621 622 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 623 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); 624 625 return PTR_ERR_OR_ZERO(stmmac_res->addr); 626 } 627 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); 628 629 /** 630 * stmmac_pltfr_remove 631 * @pdev: platform device pointer 632 * Description: this function calls the main to free the net resources 633 * and calls the platforms hook and release the resources (e.g. mem). 634 */ 635 int stmmac_pltfr_remove(struct platform_device *pdev) 636 { 637 struct net_device *ndev = platform_get_drvdata(pdev); 638 struct stmmac_priv *priv = netdev_priv(ndev); 639 struct plat_stmmacenet_data *plat = priv->plat; 640 int ret = stmmac_dvr_remove(&pdev->dev); 641 642 if (plat->exit) 643 plat->exit(pdev, plat->bsp_priv); 644 645 stmmac_remove_config_dt(pdev, plat); 646 647 return ret; 648 } 649 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); 650 651 #ifdef CONFIG_PM_SLEEP 652 /** 653 * stmmac_pltfr_suspend 654 * @dev: device pointer 655 * Description: this function is invoked when suspend the driver and it direcly 656 * call the main suspend function and then, if required, on some platform, it 657 * can call an exit helper. 658 */ 659 static int stmmac_pltfr_suspend(struct device *dev) 660 { 661 int ret; 662 struct net_device *ndev = dev_get_drvdata(dev); 663 struct stmmac_priv *priv = netdev_priv(ndev); 664 struct platform_device *pdev = to_platform_device(dev); 665 666 ret = stmmac_suspend(dev); 667 if (priv->plat->exit) 668 priv->plat->exit(pdev, priv->plat->bsp_priv); 669 670 return ret; 671 } 672 673 /** 674 * stmmac_pltfr_resume 675 * @dev: device pointer 676 * Description: this function is invoked when resume the driver before calling 677 * the main resume function, on some platforms, it can call own init helper 678 * if required. 679 */ 680 static int stmmac_pltfr_resume(struct device *dev) 681 { 682 struct net_device *ndev = dev_get_drvdata(dev); 683 struct stmmac_priv *priv = netdev_priv(ndev); 684 struct platform_device *pdev = to_platform_device(dev); 685 686 if (priv->plat->init) 687 priv->plat->init(pdev, priv->plat->bsp_priv); 688 689 return stmmac_resume(dev); 690 } 691 #endif /* CONFIG_PM_SLEEP */ 692 693 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 694 stmmac_pltfr_resume); 695 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 696 697 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); 698 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 699 MODULE_LICENSE("GPL"); 700