1 /******************************************************************************* 2 This contains the functions to handle the platform driver. 3 4 Copyright (C) 2007-2011 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 The full GNU General Public License is included in this distribution in 16 the file called "COPYING". 17 18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 19 *******************************************************************************/ 20 21 #include <linux/platform_device.h> 22 #include <linux/module.h> 23 #include <linux/io.h> 24 #include <linux/of.h> 25 #include <linux/of_net.h> 26 #include <linux/of_device.h> 27 #include <linux/of_mdio.h> 28 29 #include "stmmac.h" 30 #include "stmmac_platform.h" 31 32 #ifdef CONFIG_OF 33 34 /** 35 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins 36 * @mcast_bins: Multicast filtering bins 37 * Description: 38 * this function validates the number of Multicast filtering bins specified 39 * by the configuration through the device tree. The Synopsys GMAC supports 40 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC 41 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds 42 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is 43 * invalid and will cause the filtering algorithm to use Multicast 44 * promiscuous mode. 45 */ 46 static int dwmac1000_validate_mcast_bins(int mcast_bins) 47 { 48 int x = mcast_bins; 49 50 switch (x) { 51 case HASH_TABLE_SIZE: 52 case 128: 53 case 256: 54 break; 55 default: 56 x = 0; 57 pr_info("Hash table entries set to unexpected value %d", 58 mcast_bins); 59 break; 60 } 61 return x; 62 } 63 64 /** 65 * dwmac1000_validate_ucast_entries - validate the Unicast address entries 66 * @ucast_entries: number of Unicast address entries 67 * Description: 68 * This function validates the number of Unicast address entries supported 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 71 * logic. This function validates a valid, supported configuration is 72 * selected, and defaults to 1 Unicast address if an unsupported 73 * configuration is selected. 74 */ 75 static int dwmac1000_validate_ucast_entries(int ucast_entries) 76 { 77 int x = ucast_entries; 78 79 switch (x) { 80 case 1: 81 case 32: 82 case 64: 83 case 128: 84 break; 85 default: 86 x = 1; 87 pr_info("Unicast table entries set to unexpected value %d\n", 88 ucast_entries); 89 break; 90 } 91 return x; 92 } 93 94 /** 95 * stmmac_axi_setup - parse DT parameters for programming the AXI register 96 * @pdev: platform device 97 * @priv: driver private struct. 98 * Description: 99 * if required, from device-tree the AXI internal register can be tuned 100 * by using platform parameters. 101 */ 102 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) 103 { 104 struct device_node *np; 105 struct stmmac_axi *axi; 106 107 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); 108 if (!np) 109 return NULL; 110 111 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); 112 if (!axi) { 113 of_node_put(np); 114 return ERR_PTR(-ENOMEM); 115 } 116 117 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); 118 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); 119 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); 120 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); 121 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); 122 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); 123 124 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) 125 axi->axi_wr_osr_lmt = 1; 126 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) 127 axi->axi_rd_osr_lmt = 1; 128 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); 129 of_node_put(np); 130 131 return axi; 132 } 133 134 /** 135 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration 136 * @pdev: platform device 137 */ 138 static int stmmac_mtl_setup(struct platform_device *pdev, 139 struct plat_stmmacenet_data *plat) 140 { 141 struct device_node *q_node; 142 struct device_node *rx_node; 143 struct device_node *tx_node; 144 u8 queue = 0; 145 int ret = 0; 146 147 /* For backwards-compatibility with device trees that don't have any 148 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back 149 * to one RX and TX queues each. 150 */ 151 plat->rx_queues_to_use = 1; 152 plat->tx_queues_to_use = 1; 153 154 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 155 * to always set this, otherwise Queue will be classified as AVB 156 * (because MTL_QUEUE_AVB = 0). 157 */ 158 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 159 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 160 161 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 162 if (!rx_node) 163 return ret; 164 165 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); 166 if (!tx_node) { 167 of_node_put(rx_node); 168 return ret; 169 } 170 171 /* Processing RX queues common config */ 172 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 173 &plat->rx_queues_to_use)) 174 plat->rx_queues_to_use = 1; 175 176 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) 177 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 178 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) 179 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; 180 else 181 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 182 183 /* Processing individual RX queue config */ 184 for_each_child_of_node(rx_node, q_node) { 185 if (queue >= plat->rx_queues_to_use) 186 break; 187 188 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) 189 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 190 else if (of_property_read_bool(q_node, "snps,avb-algorithm")) 191 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 192 else 193 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 194 195 if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 196 &plat->rx_queues_cfg[queue].chan)) 197 plat->rx_queues_cfg[queue].chan = queue; 198 /* TODO: Dynamic mapping to be included in the future */ 199 200 if (of_property_read_u32(q_node, "snps,priority", 201 &plat->rx_queues_cfg[queue].prio)) { 202 plat->rx_queues_cfg[queue].prio = 0; 203 plat->rx_queues_cfg[queue].use_prio = false; 204 } else { 205 plat->rx_queues_cfg[queue].use_prio = true; 206 } 207 208 /* RX queue specific packet type routing */ 209 if (of_property_read_bool(q_node, "snps,route-avcp")) 210 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; 211 else if (of_property_read_bool(q_node, "snps,route-ptp")) 212 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; 213 else if (of_property_read_bool(q_node, "snps,route-dcbcp")) 214 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; 215 else if (of_property_read_bool(q_node, "snps,route-up")) 216 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; 217 else if (of_property_read_bool(q_node, "snps,route-multi-broad")) 218 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; 219 else 220 plat->rx_queues_cfg[queue].pkt_route = 0x0; 221 222 queue++; 223 } 224 if (queue != plat->rx_queues_to_use) { 225 ret = -EINVAL; 226 dev_err(&pdev->dev, "Not all RX queues were configured\n"); 227 goto out; 228 } 229 230 /* Processing TX queues common config */ 231 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 232 &plat->tx_queues_to_use)) 233 plat->tx_queues_to_use = 1; 234 235 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) 236 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 237 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) 238 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; 239 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) 240 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; 241 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) 242 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 243 else 244 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 245 246 queue = 0; 247 248 /* Processing individual TX queue config */ 249 for_each_child_of_node(tx_node, q_node) { 250 if (queue >= plat->tx_queues_to_use) 251 break; 252 253 if (of_property_read_u32(q_node, "snps,weight", 254 &plat->tx_queues_cfg[queue].weight)) 255 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 256 257 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { 258 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 259 } else if (of_property_read_bool(q_node, 260 "snps,avb-algorithm")) { 261 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 262 263 /* Credit Base Shaper parameters used by AVB */ 264 if (of_property_read_u32(q_node, "snps,send_slope", 265 &plat->tx_queues_cfg[queue].send_slope)) 266 plat->tx_queues_cfg[queue].send_slope = 0x0; 267 if (of_property_read_u32(q_node, "snps,idle_slope", 268 &plat->tx_queues_cfg[queue].idle_slope)) 269 plat->tx_queues_cfg[queue].idle_slope = 0x0; 270 if (of_property_read_u32(q_node, "snps,high_credit", 271 &plat->tx_queues_cfg[queue].high_credit)) 272 plat->tx_queues_cfg[queue].high_credit = 0x0; 273 if (of_property_read_u32(q_node, "snps,low_credit", 274 &plat->tx_queues_cfg[queue].low_credit)) 275 plat->tx_queues_cfg[queue].low_credit = 0x0; 276 } else { 277 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 278 } 279 280 if (of_property_read_u32(q_node, "snps,priority", 281 &plat->tx_queues_cfg[queue].prio)) { 282 plat->tx_queues_cfg[queue].prio = 0; 283 plat->tx_queues_cfg[queue].use_prio = false; 284 } else { 285 plat->tx_queues_cfg[queue].use_prio = true; 286 } 287 288 queue++; 289 } 290 if (queue != plat->tx_queues_to_use) { 291 ret = -EINVAL; 292 dev_err(&pdev->dev, "Not all TX queues were configured\n"); 293 goto out; 294 } 295 296 out: 297 of_node_put(rx_node); 298 of_node_put(tx_node); 299 of_node_put(q_node); 300 301 return ret; 302 } 303 304 /** 305 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 306 * @plat: driver data platform structure 307 * @np: device tree node 308 * @dev: device pointer 309 * Description: 310 * The mdio bus will be allocated in case of a phy transceiver is on board; 311 * it will be NULL if the fixed-link is configured. 312 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated 313 * in any case (for DSA, mdio must be registered even if fixed-link). 314 * The table below sums the supported configurations: 315 * ------------------------------- 316 * snps,phy-addr | Y 317 * ------------------------------- 318 * phy-handle | Y 319 * ------------------------------- 320 * fixed-link | N 321 * ------------------------------- 322 * snps,dwmac-mdio | 323 * even if | Y 324 * fixed-link | 325 * ------------------------------- 326 * 327 * It returns 0 in case of success otherwise -ENODEV. 328 */ 329 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 330 struct device_node *np, struct device *dev) 331 { 332 bool mdio = true; 333 static const struct of_device_id need_mdio_ids[] = { 334 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 335 {}, 336 }; 337 338 /* If phy-handle property is passed from DT, use it as the PHY */ 339 plat->phy_node = of_parse_phandle(np, "phy-handle", 0); 340 if (plat->phy_node) 341 dev_dbg(dev, "Found phy-handle subnode\n"); 342 343 /* If phy-handle is not specified, check if we have a fixed-phy */ 344 if (!plat->phy_node && of_phy_is_fixed_link(np)) { 345 if ((of_phy_register_fixed_link(np) < 0)) 346 return -ENODEV; 347 348 dev_dbg(dev, "Found fixed-link subnode\n"); 349 plat->phy_node = of_node_get(np); 350 mdio = false; 351 } 352 353 if (of_match_node(need_mdio_ids, np)) { 354 plat->mdio_node = of_get_child_by_name(np, "mdio"); 355 } else { 356 /** 357 * If snps,dwmac-mdio is passed from DT, always register 358 * the MDIO 359 */ 360 for_each_child_of_node(np, plat->mdio_node) { 361 if (of_device_is_compatible(plat->mdio_node, 362 "snps,dwmac-mdio")) 363 break; 364 } 365 } 366 367 if (plat->mdio_node) { 368 dev_dbg(dev, "Found MDIO subnode\n"); 369 mdio = true; 370 } 371 372 if (mdio) 373 plat->mdio_bus_data = 374 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), 375 GFP_KERNEL); 376 return 0; 377 } 378 379 /** 380 * stmmac_probe_config_dt - parse device-tree driver parameters 381 * @pdev: platform_device structure 382 * @mac: MAC address to use 383 * Description: 384 * this function is to read the driver parameters from device-tree and 385 * set some private fields that will be used by the main at runtime. 386 */ 387 struct plat_stmmacenet_data * 388 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 389 { 390 struct device_node *np = pdev->dev.of_node; 391 struct plat_stmmacenet_data *plat; 392 struct stmmac_dma_cfg *dma_cfg; 393 int rc; 394 395 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 396 if (!plat) 397 return ERR_PTR(-ENOMEM); 398 399 *mac = of_get_mac_address(np); 400 plat->interface = of_get_phy_mode(np); 401 402 /* Get max speed of operation from device tree */ 403 if (of_property_read_u32(np, "max-speed", &plat->max_speed)) 404 plat->max_speed = -1; 405 406 plat->bus_id = of_alias_get_id(np, "ethernet"); 407 if (plat->bus_id < 0) 408 plat->bus_id = 0; 409 410 /* Default to phy auto-detection */ 411 plat->phy_addr = -1; 412 413 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 414 * and warn of its use. Remove this when phy node support is added. 415 */ 416 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 417 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 418 419 /* To Configure PHY by using all device-tree supported properties */ 420 rc = stmmac_dt_phy(plat, np, &pdev->dev); 421 if (rc) 422 return ERR_PTR(rc); 423 424 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 425 426 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 427 428 plat->force_sf_dma_mode = 429 of_property_read_bool(np, "snps,force_sf_dma_mode"); 430 431 plat->en_tx_lpi_clockgating = 432 of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); 433 434 /* Set the maxmtu to a default of JUMBO_LEN in case the 435 * parameter is not present in the device tree. 436 */ 437 plat->maxmtu = JUMBO_LEN; 438 439 /* Set default value for multicast hash bins */ 440 plat->multicast_filter_bins = HASH_TABLE_SIZE; 441 442 /* Set default value for unicast filter entries */ 443 plat->unicast_filter_entries = 1; 444 445 /* 446 * Currently only the properties needed on SPEAr600 447 * are provided. All other properties should be added 448 * once needed on other platforms. 449 */ 450 if (of_device_is_compatible(np, "st,spear600-gmac") || 451 of_device_is_compatible(np, "snps,dwmac-3.50a") || 452 of_device_is_compatible(np, "snps,dwmac-3.70a") || 453 of_device_is_compatible(np, "snps,dwmac")) { 454 /* Note that the max-frame-size parameter as defined in the 455 * ePAPR v1.1 spec is defined as max-frame-size, it's 456 * actually used as the IEEE definition of MAC Client 457 * data, or MTU. The ePAPR specification is confusing as 458 * the definition is max-frame-size, but usage examples 459 * are clearly MTUs 460 */ 461 of_property_read_u32(np, "max-frame-size", &plat->maxmtu); 462 of_property_read_u32(np, "snps,multicast-filter-bins", 463 &plat->multicast_filter_bins); 464 of_property_read_u32(np, "snps,perfect-filter-entries", 465 &plat->unicast_filter_entries); 466 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( 467 plat->unicast_filter_entries); 468 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( 469 plat->multicast_filter_bins); 470 plat->has_gmac = 1; 471 plat->pmt = 1; 472 } 473 474 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 475 of_device_is_compatible(np, "snps,dwmac-4.10a") || 476 of_device_is_compatible(np, "snps,dwmac-4.20a")) { 477 plat->has_gmac4 = 1; 478 plat->has_gmac = 0; 479 plat->pmt = 1; 480 plat->tso_en = of_property_read_bool(np, "snps,tso"); 481 } 482 483 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 484 of_device_is_compatible(np, "snps,dwmac-3.710")) { 485 plat->enh_desc = 1; 486 plat->bugged_jumbo = 1; 487 plat->force_sf_dma_mode = 1; 488 } 489 490 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 491 GFP_KERNEL); 492 if (!dma_cfg) { 493 stmmac_remove_config_dt(pdev, plat); 494 return ERR_PTR(-ENOMEM); 495 } 496 plat->dma_cfg = dma_cfg; 497 498 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 499 if (!dma_cfg->pbl) 500 dma_cfg->pbl = DEFAULT_DMA_PBL; 501 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); 502 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); 503 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); 504 505 dma_cfg->aal = of_property_read_bool(np, "snps,aal"); 506 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 507 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 508 509 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 510 if (plat->force_thresh_dma_mode) { 511 plat->force_sf_dma_mode = 0; 512 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 513 } 514 515 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); 516 517 plat->axi = stmmac_axi_setup(pdev); 518 519 rc = stmmac_mtl_setup(pdev, plat); 520 if (rc) { 521 stmmac_remove_config_dt(pdev, plat); 522 return ERR_PTR(rc); 523 } 524 525 /* clock setup */ 526 plat->stmmac_clk = devm_clk_get(&pdev->dev, 527 STMMAC_RESOURCE_NAME); 528 if (IS_ERR(plat->stmmac_clk)) { 529 dev_warn(&pdev->dev, "Cannot get CSR clock\n"); 530 plat->stmmac_clk = NULL; 531 } 532 clk_prepare_enable(plat->stmmac_clk); 533 534 plat->pclk = devm_clk_get(&pdev->dev, "pclk"); 535 if (IS_ERR(plat->pclk)) { 536 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) 537 goto error_pclk_get; 538 539 plat->pclk = NULL; 540 } 541 clk_prepare_enable(plat->pclk); 542 543 /* Fall-back to main clock in case of no PTP ref is passed */ 544 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); 545 if (IS_ERR(plat->clk_ptp_ref)) { 546 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 547 plat->clk_ptp_ref = NULL; 548 dev_warn(&pdev->dev, "PTP uses main clock\n"); 549 } else { 550 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 551 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 552 } 553 554 plat->stmmac_rst = devm_reset_control_get(&pdev->dev, 555 STMMAC_RESOURCE_NAME); 556 if (IS_ERR(plat->stmmac_rst)) { 557 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) 558 goto error_hw_init; 559 560 dev_info(&pdev->dev, "no reset control found\n"); 561 plat->stmmac_rst = NULL; 562 } 563 564 return plat; 565 566 error_hw_init: 567 clk_disable_unprepare(plat->pclk); 568 error_pclk_get: 569 clk_disable_unprepare(plat->stmmac_clk); 570 571 return ERR_PTR(-EPROBE_DEFER); 572 } 573 574 /** 575 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 576 * @pdev: platform_device structure 577 * @plat: driver data platform structure 578 * 579 * Release resources claimed by stmmac_probe_config_dt(). 580 */ 581 void stmmac_remove_config_dt(struct platform_device *pdev, 582 struct plat_stmmacenet_data *plat) 583 { 584 struct device_node *np = pdev->dev.of_node; 585 586 if (of_phy_is_fixed_link(np)) 587 of_phy_deregister_fixed_link(np); 588 of_node_put(plat->phy_node); 589 of_node_put(plat->mdio_node); 590 } 591 #else 592 struct plat_stmmacenet_data * 593 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 594 { 595 return ERR_PTR(-EINVAL); 596 } 597 598 void stmmac_remove_config_dt(struct platform_device *pdev, 599 struct plat_stmmacenet_data *plat) 600 { 601 } 602 #endif /* CONFIG_OF */ 603 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 604 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 605 606 int stmmac_get_platform_resources(struct platform_device *pdev, 607 struct stmmac_resources *stmmac_res) 608 { 609 struct resource *res; 610 611 memset(stmmac_res, 0, sizeof(*stmmac_res)); 612 613 /* Get IRQ information early to have an ability to ask for deferred 614 * probe if needed before we went too far with resource allocation. 615 */ 616 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); 617 if (stmmac_res->irq < 0) { 618 if (stmmac_res->irq != -EPROBE_DEFER) { 619 dev_err(&pdev->dev, 620 "MAC IRQ configuration information not found\n"); 621 } 622 return stmmac_res->irq; 623 } 624 625 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq 626 * The external wake up irq can be passed through the platform code 627 * named as "eth_wake_irq" 628 * 629 * In case the wake up interrupt is not passed from the platform 630 * so the driver will continue to use the mac irq (ndev->irq) 631 */ 632 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 633 if (stmmac_res->wol_irq < 0) { 634 if (stmmac_res->wol_irq == -EPROBE_DEFER) 635 return -EPROBE_DEFER; 636 stmmac_res->wol_irq = stmmac_res->irq; 637 } 638 639 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 640 if (stmmac_res->lpi_irq == -EPROBE_DEFER) 641 return -EPROBE_DEFER; 642 643 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 644 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); 645 646 return PTR_ERR_OR_ZERO(stmmac_res->addr); 647 } 648 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); 649 650 /** 651 * stmmac_pltfr_remove 652 * @pdev: platform device pointer 653 * Description: this function calls the main to free the net resources 654 * and calls the platforms hook and release the resources (e.g. mem). 655 */ 656 int stmmac_pltfr_remove(struct platform_device *pdev) 657 { 658 struct net_device *ndev = platform_get_drvdata(pdev); 659 struct stmmac_priv *priv = netdev_priv(ndev); 660 struct plat_stmmacenet_data *plat = priv->plat; 661 int ret = stmmac_dvr_remove(&pdev->dev); 662 663 if (plat->exit) 664 plat->exit(pdev, plat->bsp_priv); 665 666 stmmac_remove_config_dt(pdev, plat); 667 668 return ret; 669 } 670 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); 671 672 #ifdef CONFIG_PM_SLEEP 673 /** 674 * stmmac_pltfr_suspend 675 * @dev: device pointer 676 * Description: this function is invoked when suspend the driver and it direcly 677 * call the main suspend function and then, if required, on some platform, it 678 * can call an exit helper. 679 */ 680 static int stmmac_pltfr_suspend(struct device *dev) 681 { 682 int ret; 683 struct net_device *ndev = dev_get_drvdata(dev); 684 struct stmmac_priv *priv = netdev_priv(ndev); 685 struct platform_device *pdev = to_platform_device(dev); 686 687 ret = stmmac_suspend(dev); 688 if (priv->plat->exit) 689 priv->plat->exit(pdev, priv->plat->bsp_priv); 690 691 return ret; 692 } 693 694 /** 695 * stmmac_pltfr_resume 696 * @dev: device pointer 697 * Description: this function is invoked when resume the driver before calling 698 * the main resume function, on some platforms, it can call own init helper 699 * if required. 700 */ 701 static int stmmac_pltfr_resume(struct device *dev) 702 { 703 struct net_device *ndev = dev_get_drvdata(dev); 704 struct stmmac_priv *priv = netdev_priv(ndev); 705 struct platform_device *pdev = to_platform_device(dev); 706 707 if (priv->plat->init) 708 priv->plat->init(pdev, priv->plat->bsp_priv); 709 710 return stmmac_resume(dev); 711 } 712 #endif /* CONFIG_PM_SLEEP */ 713 714 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 715 stmmac_pltfr_resume); 716 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 717 718 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); 719 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 720 MODULE_LICENSE("GPL"); 721