1 /******************************************************************************* 2 This contains the functions to handle the platform driver. 3 4 Copyright (C) 2007-2011 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 The full GNU General Public License is included in this distribution in 16 the file called "COPYING". 17 18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 19 *******************************************************************************/ 20 21 #include <linux/platform_device.h> 22 #include <linux/module.h> 23 #include <linux/io.h> 24 #include <linux/of.h> 25 #include <linux/of_net.h> 26 #include <linux/of_device.h> 27 #include <linux/of_mdio.h> 28 29 #include "stmmac.h" 30 #include "stmmac_platform.h" 31 32 #ifdef CONFIG_OF 33 34 /** 35 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins 36 * @mcast_bins: Multicast filtering bins 37 * Description: 38 * this function validates the number of Multicast filtering bins specified 39 * by the configuration through the device tree. The Synopsys GMAC supports 40 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC 41 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds 42 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is 43 * invalid and will cause the filtering algorithm to use Multicast 44 * promiscuous mode. 45 */ 46 static int dwmac1000_validate_mcast_bins(int mcast_bins) 47 { 48 int x = mcast_bins; 49 50 switch (x) { 51 case HASH_TABLE_SIZE: 52 case 128: 53 case 256: 54 break; 55 default: 56 x = 0; 57 pr_info("Hash table entries set to unexpected value %d", 58 mcast_bins); 59 break; 60 } 61 return x; 62 } 63 64 /** 65 * dwmac1000_validate_ucast_entries - validate the Unicast address entries 66 * @ucast_entries: number of Unicast address entries 67 * Description: 68 * This function validates the number of Unicast address entries supported 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 71 * logic. This function validates a valid, supported configuration is 72 * selected, and defaults to 1 Unicast address if an unsupported 73 * configuration is selected. 74 */ 75 static int dwmac1000_validate_ucast_entries(int ucast_entries) 76 { 77 int x = ucast_entries; 78 79 switch (x) { 80 case 1: 81 case 32: 82 case 64: 83 case 128: 84 break; 85 default: 86 x = 1; 87 pr_info("Unicast table entries set to unexpected value %d\n", 88 ucast_entries); 89 break; 90 } 91 return x; 92 } 93 94 /** 95 * stmmac_axi_setup - parse DT parameters for programming the AXI register 96 * @pdev: platform device 97 * @priv: driver private struct. 98 * Description: 99 * if required, from device-tree the AXI internal register can be tuned 100 * by using platform parameters. 101 */ 102 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) 103 { 104 struct device_node *np; 105 struct stmmac_axi *axi; 106 107 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); 108 if (!np) 109 return NULL; 110 111 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); 112 if (!axi) { 113 of_node_put(np); 114 return ERR_PTR(-ENOMEM); 115 } 116 117 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); 118 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); 119 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); 120 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); 121 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); 122 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); 123 124 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) 125 axi->axi_wr_osr_lmt = 1; 126 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) 127 axi->axi_rd_osr_lmt = 1; 128 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); 129 of_node_put(np); 130 131 return axi; 132 } 133 134 /** 135 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration 136 * @pdev: platform device 137 */ 138 static int stmmac_mtl_setup(struct platform_device *pdev, 139 struct plat_stmmacenet_data *plat) 140 { 141 struct device_node *q_node; 142 struct device_node *rx_node; 143 struct device_node *tx_node; 144 u8 queue = 0; 145 int ret = 0; 146 147 /* For backwards-compatibility with device trees that don't have any 148 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back 149 * to one RX and TX queues each. 150 */ 151 plat->rx_queues_to_use = 1; 152 plat->tx_queues_to_use = 1; 153 154 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 155 * to always set this, otherwise Queue will be classified as AVB 156 * (because MTL_QUEUE_AVB = 0). 157 */ 158 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 159 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 160 161 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 162 if (!rx_node) 163 return ret; 164 165 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); 166 if (!tx_node) { 167 of_node_put(rx_node); 168 return ret; 169 } 170 171 /* Processing RX queues common config */ 172 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 173 &plat->rx_queues_to_use)) 174 plat->rx_queues_to_use = 1; 175 176 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) 177 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 178 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) 179 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; 180 else 181 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 182 183 /* Processing individual RX queue config */ 184 for_each_child_of_node(rx_node, q_node) { 185 if (queue >= plat->rx_queues_to_use) 186 break; 187 188 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) 189 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 190 else if (of_property_read_bool(q_node, "snps,avb-algorithm")) 191 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 192 else 193 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 194 195 if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 196 &plat->rx_queues_cfg[queue].chan)) 197 plat->rx_queues_cfg[queue].chan = queue; 198 /* TODO: Dynamic mapping to be included in the future */ 199 200 if (of_property_read_u32(q_node, "snps,priority", 201 &plat->rx_queues_cfg[queue].prio)) { 202 plat->rx_queues_cfg[queue].prio = 0; 203 plat->rx_queues_cfg[queue].use_prio = false; 204 } else { 205 plat->rx_queues_cfg[queue].use_prio = true; 206 } 207 208 /* RX queue specific packet type routing */ 209 if (of_property_read_bool(q_node, "snps,route-avcp")) 210 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; 211 else if (of_property_read_bool(q_node, "snps,route-ptp")) 212 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; 213 else if (of_property_read_bool(q_node, "snps,route-dcbcp")) 214 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; 215 else if (of_property_read_bool(q_node, "snps,route-up")) 216 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; 217 else if (of_property_read_bool(q_node, "snps,route-multi-broad")) 218 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; 219 else 220 plat->rx_queues_cfg[queue].pkt_route = 0x0; 221 222 queue++; 223 } 224 if (queue != plat->rx_queues_to_use) { 225 ret = -EINVAL; 226 dev_err(&pdev->dev, "Not all RX queues were configured\n"); 227 goto out; 228 } 229 230 /* Processing TX queues common config */ 231 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 232 &plat->tx_queues_to_use)) 233 plat->tx_queues_to_use = 1; 234 235 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) 236 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 237 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) 238 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; 239 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) 240 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; 241 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) 242 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 243 else 244 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 245 246 queue = 0; 247 248 /* Processing individual TX queue config */ 249 for_each_child_of_node(tx_node, q_node) { 250 if (queue >= plat->tx_queues_to_use) 251 break; 252 253 if (of_property_read_u32(q_node, "snps,weight", 254 &plat->tx_queues_cfg[queue].weight)) 255 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 256 257 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { 258 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 259 } else if (of_property_read_bool(q_node, 260 "snps,avb-algorithm")) { 261 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 262 263 /* Credit Base Shaper parameters used by AVB */ 264 if (of_property_read_u32(q_node, "snps,send_slope", 265 &plat->tx_queues_cfg[queue].send_slope)) 266 plat->tx_queues_cfg[queue].send_slope = 0x0; 267 if (of_property_read_u32(q_node, "snps,idle_slope", 268 &plat->tx_queues_cfg[queue].idle_slope)) 269 plat->tx_queues_cfg[queue].idle_slope = 0x0; 270 if (of_property_read_u32(q_node, "snps,high_credit", 271 &plat->tx_queues_cfg[queue].high_credit)) 272 plat->tx_queues_cfg[queue].high_credit = 0x0; 273 if (of_property_read_u32(q_node, "snps,low_credit", 274 &plat->tx_queues_cfg[queue].low_credit)) 275 plat->tx_queues_cfg[queue].low_credit = 0x0; 276 } else { 277 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 278 } 279 280 if (of_property_read_u32(q_node, "snps,priority", 281 &plat->tx_queues_cfg[queue].prio)) { 282 plat->tx_queues_cfg[queue].prio = 0; 283 plat->tx_queues_cfg[queue].use_prio = false; 284 } else { 285 plat->tx_queues_cfg[queue].use_prio = true; 286 } 287 288 queue++; 289 } 290 if (queue != plat->tx_queues_to_use) { 291 ret = -EINVAL; 292 dev_err(&pdev->dev, "Not all TX queues were configured\n"); 293 goto out; 294 } 295 296 out: 297 of_node_put(rx_node); 298 of_node_put(tx_node); 299 of_node_put(q_node); 300 301 return ret; 302 } 303 304 /** 305 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 306 * @plat: driver data platform structure 307 * @np: device tree node 308 * @dev: device pointer 309 * Description: 310 * The mdio bus will be allocated in case of a phy transceiver is on board; 311 * it will be NULL if the fixed-link is configured. 312 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated 313 * in any case (for DSA, mdio must be registered even if fixed-link). 314 * The table below sums the supported configurations: 315 * ------------------------------- 316 * snps,phy-addr | Y 317 * ------------------------------- 318 * phy-handle | Y 319 * ------------------------------- 320 * fixed-link | N 321 * ------------------------------- 322 * snps,dwmac-mdio | 323 * even if | Y 324 * fixed-link | 325 * ------------------------------- 326 * 327 * It returns 0 in case of success otherwise -ENODEV. 328 */ 329 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 330 struct device_node *np, struct device *dev) 331 { 332 bool mdio = true; 333 static const struct of_device_id need_mdio_ids[] = { 334 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 335 {}, 336 }; 337 338 /* If phy-handle property is passed from DT, use it as the PHY */ 339 plat->phy_node = of_parse_phandle(np, "phy-handle", 0); 340 if (plat->phy_node) 341 dev_dbg(dev, "Found phy-handle subnode\n"); 342 343 /* If phy-handle is not specified, check if we have a fixed-phy */ 344 if (!plat->phy_node && of_phy_is_fixed_link(np)) { 345 if ((of_phy_register_fixed_link(np) < 0)) 346 return -ENODEV; 347 348 dev_dbg(dev, "Found fixed-link subnode\n"); 349 plat->phy_node = of_node_get(np); 350 mdio = false; 351 } 352 353 if (of_match_node(need_mdio_ids, np)) { 354 plat->mdio_node = of_get_child_by_name(np, "mdio"); 355 } else { 356 /** 357 * If snps,dwmac-mdio is passed from DT, always register 358 * the MDIO 359 */ 360 for_each_child_of_node(np, plat->mdio_node) { 361 if (of_device_is_compatible(plat->mdio_node, 362 "snps,dwmac-mdio")) 363 break; 364 } 365 } 366 367 if (plat->mdio_node) { 368 dev_dbg(dev, "Found MDIO subnode\n"); 369 mdio = true; 370 } 371 372 if (mdio) 373 plat->mdio_bus_data = 374 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), 375 GFP_KERNEL); 376 return 0; 377 } 378 379 /** 380 * stmmac_probe_config_dt - parse device-tree driver parameters 381 * @pdev: platform_device structure 382 * @mac: MAC address to use 383 * Description: 384 * this function is to read the driver parameters from device-tree and 385 * set some private fields that will be used by the main at runtime. 386 */ 387 struct plat_stmmacenet_data * 388 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 389 { 390 struct device_node *np = pdev->dev.of_node; 391 struct plat_stmmacenet_data *plat; 392 struct stmmac_dma_cfg *dma_cfg; 393 int rc; 394 395 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 396 if (!plat) 397 return ERR_PTR(-ENOMEM); 398 399 *mac = of_get_mac_address(np); 400 plat->interface = of_get_phy_mode(np); 401 402 /* Get max speed of operation from device tree */ 403 if (of_property_read_u32(np, "max-speed", &plat->max_speed)) 404 plat->max_speed = -1; 405 406 plat->bus_id = of_alias_get_id(np, "ethernet"); 407 if (plat->bus_id < 0) 408 plat->bus_id = 0; 409 410 /* Default to phy auto-detection */ 411 plat->phy_addr = -1; 412 413 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 414 * and warn of its use. Remove this when phy node support is added. 415 */ 416 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 417 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 418 419 /* To Configure PHY by using all device-tree supported properties */ 420 rc = stmmac_dt_phy(plat, np, &pdev->dev); 421 if (rc) 422 return ERR_PTR(rc); 423 424 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 425 426 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 427 428 plat->force_sf_dma_mode = 429 of_property_read_bool(np, "snps,force_sf_dma_mode"); 430 431 plat->en_tx_lpi_clockgating = 432 of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); 433 434 /* Set the maxmtu to a default of JUMBO_LEN in case the 435 * parameter is not present in the device tree. 436 */ 437 plat->maxmtu = JUMBO_LEN; 438 439 /* Set default value for multicast hash bins */ 440 plat->multicast_filter_bins = HASH_TABLE_SIZE; 441 442 /* Set default value for unicast filter entries */ 443 plat->unicast_filter_entries = 1; 444 445 /* 446 * Currently only the properties needed on SPEAr600 447 * are provided. All other properties should be added 448 * once needed on other platforms. 449 */ 450 if (of_device_is_compatible(np, "st,spear600-gmac") || 451 of_device_is_compatible(np, "snps,dwmac-3.50a") || 452 of_device_is_compatible(np, "snps,dwmac-3.70a") || 453 of_device_is_compatible(np, "snps,dwmac")) { 454 /* Note that the max-frame-size parameter as defined in the 455 * ePAPR v1.1 spec is defined as max-frame-size, it's 456 * actually used as the IEEE definition of MAC Client 457 * data, or MTU. The ePAPR specification is confusing as 458 * the definition is max-frame-size, but usage examples 459 * are clearly MTUs 460 */ 461 of_property_read_u32(np, "max-frame-size", &plat->maxmtu); 462 of_property_read_u32(np, "snps,multicast-filter-bins", 463 &plat->multicast_filter_bins); 464 of_property_read_u32(np, "snps,perfect-filter-entries", 465 &plat->unicast_filter_entries); 466 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( 467 plat->unicast_filter_entries); 468 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( 469 plat->multicast_filter_bins); 470 plat->has_gmac = 1; 471 plat->pmt = 1; 472 } 473 474 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 475 of_device_is_compatible(np, "snps,dwmac-4.10a")) { 476 plat->has_gmac4 = 1; 477 plat->has_gmac = 0; 478 plat->pmt = 1; 479 plat->tso_en = of_property_read_bool(np, "snps,tso"); 480 } 481 482 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 483 of_device_is_compatible(np, "snps,dwmac-3.710")) { 484 plat->enh_desc = 1; 485 plat->bugged_jumbo = 1; 486 plat->force_sf_dma_mode = 1; 487 } 488 489 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 490 GFP_KERNEL); 491 if (!dma_cfg) { 492 stmmac_remove_config_dt(pdev, plat); 493 return ERR_PTR(-ENOMEM); 494 } 495 plat->dma_cfg = dma_cfg; 496 497 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 498 if (!dma_cfg->pbl) 499 dma_cfg->pbl = DEFAULT_DMA_PBL; 500 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); 501 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); 502 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); 503 504 dma_cfg->aal = of_property_read_bool(np, "snps,aal"); 505 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 506 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 507 508 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 509 if (plat->force_thresh_dma_mode) { 510 plat->force_sf_dma_mode = 0; 511 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 512 } 513 514 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); 515 516 plat->axi = stmmac_axi_setup(pdev); 517 518 rc = stmmac_mtl_setup(pdev, plat); 519 if (rc) { 520 stmmac_remove_config_dt(pdev, plat); 521 return ERR_PTR(rc); 522 } 523 524 /* clock setup */ 525 plat->stmmac_clk = devm_clk_get(&pdev->dev, 526 STMMAC_RESOURCE_NAME); 527 if (IS_ERR(plat->stmmac_clk)) { 528 dev_warn(&pdev->dev, "Cannot get CSR clock\n"); 529 plat->stmmac_clk = NULL; 530 } 531 clk_prepare_enable(plat->stmmac_clk); 532 533 plat->pclk = devm_clk_get(&pdev->dev, "pclk"); 534 if (IS_ERR(plat->pclk)) { 535 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) 536 goto error_pclk_get; 537 538 plat->pclk = NULL; 539 } 540 clk_prepare_enable(plat->pclk); 541 542 /* Fall-back to main clock in case of no PTP ref is passed */ 543 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); 544 if (IS_ERR(plat->clk_ptp_ref)) { 545 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 546 plat->clk_ptp_ref = NULL; 547 dev_warn(&pdev->dev, "PTP uses main clock\n"); 548 } else { 549 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 550 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 551 } 552 553 plat->stmmac_rst = devm_reset_control_get(&pdev->dev, 554 STMMAC_RESOURCE_NAME); 555 if (IS_ERR(plat->stmmac_rst)) { 556 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) 557 goto error_hw_init; 558 559 dev_info(&pdev->dev, "no reset control found\n"); 560 plat->stmmac_rst = NULL; 561 } 562 563 return plat; 564 565 error_hw_init: 566 clk_disable_unprepare(plat->pclk); 567 error_pclk_get: 568 clk_disable_unprepare(plat->stmmac_clk); 569 570 return ERR_PTR(-EPROBE_DEFER); 571 } 572 573 /** 574 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 575 * @pdev: platform_device structure 576 * @plat: driver data platform structure 577 * 578 * Release resources claimed by stmmac_probe_config_dt(). 579 */ 580 void stmmac_remove_config_dt(struct platform_device *pdev, 581 struct plat_stmmacenet_data *plat) 582 { 583 struct device_node *np = pdev->dev.of_node; 584 585 if (of_phy_is_fixed_link(np)) 586 of_phy_deregister_fixed_link(np); 587 of_node_put(plat->phy_node); 588 of_node_put(plat->mdio_node); 589 } 590 #else 591 struct plat_stmmacenet_data * 592 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 593 { 594 return ERR_PTR(-EINVAL); 595 } 596 597 void stmmac_remove_config_dt(struct platform_device *pdev, 598 struct plat_stmmacenet_data *plat) 599 { 600 } 601 #endif /* CONFIG_OF */ 602 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 603 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 604 605 int stmmac_get_platform_resources(struct platform_device *pdev, 606 struct stmmac_resources *stmmac_res) 607 { 608 struct resource *res; 609 610 memset(stmmac_res, 0, sizeof(*stmmac_res)); 611 612 /* Get IRQ information early to have an ability to ask for deferred 613 * probe if needed before we went too far with resource allocation. 614 */ 615 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); 616 if (stmmac_res->irq < 0) { 617 if (stmmac_res->irq != -EPROBE_DEFER) { 618 dev_err(&pdev->dev, 619 "MAC IRQ configuration information not found\n"); 620 } 621 return stmmac_res->irq; 622 } 623 624 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq 625 * The external wake up irq can be passed through the platform code 626 * named as "eth_wake_irq" 627 * 628 * In case the wake up interrupt is not passed from the platform 629 * so the driver will continue to use the mac irq (ndev->irq) 630 */ 631 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 632 if (stmmac_res->wol_irq < 0) { 633 if (stmmac_res->wol_irq == -EPROBE_DEFER) 634 return -EPROBE_DEFER; 635 stmmac_res->wol_irq = stmmac_res->irq; 636 } 637 638 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 639 if (stmmac_res->lpi_irq == -EPROBE_DEFER) 640 return -EPROBE_DEFER; 641 642 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 643 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); 644 645 return PTR_ERR_OR_ZERO(stmmac_res->addr); 646 } 647 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); 648 649 /** 650 * stmmac_pltfr_remove 651 * @pdev: platform device pointer 652 * Description: this function calls the main to free the net resources 653 * and calls the platforms hook and release the resources (e.g. mem). 654 */ 655 int stmmac_pltfr_remove(struct platform_device *pdev) 656 { 657 struct net_device *ndev = platform_get_drvdata(pdev); 658 struct stmmac_priv *priv = netdev_priv(ndev); 659 struct plat_stmmacenet_data *plat = priv->plat; 660 int ret = stmmac_dvr_remove(&pdev->dev); 661 662 if (plat->exit) 663 plat->exit(pdev, plat->bsp_priv); 664 665 stmmac_remove_config_dt(pdev, plat); 666 667 return ret; 668 } 669 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); 670 671 #ifdef CONFIG_PM_SLEEP 672 /** 673 * stmmac_pltfr_suspend 674 * @dev: device pointer 675 * Description: this function is invoked when suspend the driver and it direcly 676 * call the main suspend function and then, if required, on some platform, it 677 * can call an exit helper. 678 */ 679 static int stmmac_pltfr_suspend(struct device *dev) 680 { 681 int ret; 682 struct net_device *ndev = dev_get_drvdata(dev); 683 struct stmmac_priv *priv = netdev_priv(ndev); 684 struct platform_device *pdev = to_platform_device(dev); 685 686 ret = stmmac_suspend(dev); 687 if (priv->plat->exit) 688 priv->plat->exit(pdev, priv->plat->bsp_priv); 689 690 return ret; 691 } 692 693 /** 694 * stmmac_pltfr_resume 695 * @dev: device pointer 696 * Description: this function is invoked when resume the driver before calling 697 * the main resume function, on some platforms, it can call own init helper 698 * if required. 699 */ 700 static int stmmac_pltfr_resume(struct device *dev) 701 { 702 struct net_device *ndev = dev_get_drvdata(dev); 703 struct stmmac_priv *priv = netdev_priv(ndev); 704 struct platform_device *pdev = to_platform_device(dev); 705 706 if (priv->plat->init) 707 priv->plat->init(pdev, priv->plat->bsp_priv); 708 709 return stmmac_resume(dev); 710 } 711 #endif /* CONFIG_PM_SLEEP */ 712 713 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 714 stmmac_pltfr_resume); 715 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 716 717 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); 718 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 719 MODULE_LICENSE("GPL"); 720