1 /*******************************************************************************
2   This contains the functions to handle the platform driver.
3 
4   Copyright (C) 2007-2011  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/platform_device.h>
22 #include <linux/module.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_net.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 
29 #include "stmmac.h"
30 #include "stmmac_platform.h"
31 
32 #ifdef CONFIG_OF
33 
34 /**
35  * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
36  * @mcast_bins: Multicast filtering bins
37  * Description:
38  * this function validates the number of Multicast filtering bins specified
39  * by the configuration through the device tree. The Synopsys GMAC supports
40  * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
41  * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
42  * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
43  * invalid and will cause the filtering algorithm to use Multicast
44  * promiscuous mode.
45  */
46 static int dwmac1000_validate_mcast_bins(int mcast_bins)
47 {
48 	int x = mcast_bins;
49 
50 	switch (x) {
51 	case HASH_TABLE_SIZE:
52 	case 128:
53 	case 256:
54 		break;
55 	default:
56 		x = 0;
57 		pr_info("Hash table entries set to unexpected value %d",
58 			mcast_bins);
59 		break;
60 	}
61 	return x;
62 }
63 
64 /**
65  * dwmac1000_validate_ucast_entries - validate the Unicast address entries
66  * @ucast_entries: number of Unicast address entries
67  * Description:
68  * This function validates the number of Unicast address entries supported
69  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70  * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
71  * logic. This function validates a valid, supported configuration is
72  * selected, and defaults to 1 Unicast address if an unsupported
73  * configuration is selected.
74  */
75 static int dwmac1000_validate_ucast_entries(int ucast_entries)
76 {
77 	int x = ucast_entries;
78 
79 	switch (x) {
80 	case 1:
81 	case 32:
82 	case 64:
83 	case 128:
84 		break;
85 	default:
86 		x = 1;
87 		pr_info("Unicast table entries set to unexpected value %d\n",
88 			ucast_entries);
89 		break;
90 	}
91 	return x;
92 }
93 
94 /**
95  * stmmac_axi_setup - parse DT parameters for programming the AXI register
96  * @pdev: platform device
97  * Description:
98  * if required, from device-tree the AXI internal register can be tuned
99  * by using platform parameters.
100  */
101 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
102 {
103 	struct device_node *np;
104 	struct stmmac_axi *axi;
105 
106 	np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
107 	if (!np)
108 		return NULL;
109 
110 	axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
111 	if (!axi) {
112 		of_node_put(np);
113 		return ERR_PTR(-ENOMEM);
114 	}
115 
116 	axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
117 	axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
118 	axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
119 	axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
120 	axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
121 	axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
122 
123 	if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
124 		axi->axi_wr_osr_lmt = 1;
125 	if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
126 		axi->axi_rd_osr_lmt = 1;
127 	of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
128 	of_node_put(np);
129 
130 	return axi;
131 }
132 
133 /**
134  * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
135  * @pdev: platform device
136  */
137 static int stmmac_mtl_setup(struct platform_device *pdev,
138 			    struct plat_stmmacenet_data *plat)
139 {
140 	struct device_node *q_node;
141 	struct device_node *rx_node;
142 	struct device_node *tx_node;
143 	u8 queue = 0;
144 	int ret = 0;
145 
146 	/* For backwards-compatibility with device trees that don't have any
147 	 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
148 	 * to one RX and TX queues each.
149 	 */
150 	plat->rx_queues_to_use = 1;
151 	plat->tx_queues_to_use = 1;
152 
153 	/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
154 	 * to always set this, otherwise Queue will be classified as AVB
155 	 * (because MTL_QUEUE_AVB = 0).
156 	 */
157 	plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 	plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
159 
160 	rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
161 	if (!rx_node)
162 		return ret;
163 
164 	tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
165 	if (!tx_node) {
166 		of_node_put(rx_node);
167 		return ret;
168 	}
169 
170 	/* Processing RX queues common config */
171 	if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
172 				 &plat->rx_queues_to_use))
173 		plat->rx_queues_to_use = 1;
174 
175 	if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
176 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
177 	else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
178 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
179 	else
180 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
181 
182 	/* Processing individual RX queue config */
183 	for_each_child_of_node(rx_node, q_node) {
184 		if (queue >= plat->rx_queues_to_use)
185 			break;
186 
187 		if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
188 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
189 		else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
190 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
191 		else
192 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
193 
194 		if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
195 					 &plat->rx_queues_cfg[queue].chan))
196 			plat->rx_queues_cfg[queue].chan = queue;
197 		/* TODO: Dynamic mapping to be included in the future */
198 
199 		if (of_property_read_u32(q_node, "snps,priority",
200 					&plat->rx_queues_cfg[queue].prio)) {
201 			plat->rx_queues_cfg[queue].prio = 0;
202 			plat->rx_queues_cfg[queue].use_prio = false;
203 		} else {
204 			plat->rx_queues_cfg[queue].use_prio = true;
205 		}
206 
207 		/* RX queue specific packet type routing */
208 		if (of_property_read_bool(q_node, "snps,route-avcp"))
209 			plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
210 		else if (of_property_read_bool(q_node, "snps,route-ptp"))
211 			plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
212 		else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
213 			plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
214 		else if (of_property_read_bool(q_node, "snps,route-up"))
215 			plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
216 		else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
217 			plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
218 		else
219 			plat->rx_queues_cfg[queue].pkt_route = 0x0;
220 
221 		queue++;
222 	}
223 	if (queue != plat->rx_queues_to_use) {
224 		ret = -EINVAL;
225 		dev_err(&pdev->dev, "Not all RX queues were configured\n");
226 		goto out;
227 	}
228 
229 	/* Processing TX queues common config */
230 	if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
231 				 &plat->tx_queues_to_use))
232 		plat->tx_queues_to_use = 1;
233 
234 	if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
235 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
236 	else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
237 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
238 	else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
239 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
240 	else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
241 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
242 	else
243 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
244 
245 	queue = 0;
246 
247 	/* Processing individual TX queue config */
248 	for_each_child_of_node(tx_node, q_node) {
249 		if (queue >= plat->tx_queues_to_use)
250 			break;
251 
252 		if (of_property_read_u32(q_node, "snps,weight",
253 					 &plat->tx_queues_cfg[queue].weight))
254 			plat->tx_queues_cfg[queue].weight = 0x10 + queue;
255 
256 		if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
257 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
258 		} else if (of_property_read_bool(q_node,
259 						 "snps,avb-algorithm")) {
260 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
261 
262 			/* Credit Base Shaper parameters used by AVB */
263 			if (of_property_read_u32(q_node, "snps,send_slope",
264 				&plat->tx_queues_cfg[queue].send_slope))
265 				plat->tx_queues_cfg[queue].send_slope = 0x0;
266 			if (of_property_read_u32(q_node, "snps,idle_slope",
267 				&plat->tx_queues_cfg[queue].idle_slope))
268 				plat->tx_queues_cfg[queue].idle_slope = 0x0;
269 			if (of_property_read_u32(q_node, "snps,high_credit",
270 				&plat->tx_queues_cfg[queue].high_credit))
271 				plat->tx_queues_cfg[queue].high_credit = 0x0;
272 			if (of_property_read_u32(q_node, "snps,low_credit",
273 				&plat->tx_queues_cfg[queue].low_credit))
274 				plat->tx_queues_cfg[queue].low_credit = 0x0;
275 		} else {
276 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
277 		}
278 
279 		if (of_property_read_u32(q_node, "snps,priority",
280 					&plat->tx_queues_cfg[queue].prio)) {
281 			plat->tx_queues_cfg[queue].prio = 0;
282 			plat->tx_queues_cfg[queue].use_prio = false;
283 		} else {
284 			plat->tx_queues_cfg[queue].use_prio = true;
285 		}
286 
287 		queue++;
288 	}
289 	if (queue != plat->tx_queues_to_use) {
290 		ret = -EINVAL;
291 		dev_err(&pdev->dev, "Not all TX queues were configured\n");
292 		goto out;
293 	}
294 
295 out:
296 	of_node_put(rx_node);
297 	of_node_put(tx_node);
298 	of_node_put(q_node);
299 
300 	return ret;
301 }
302 
303 /**
304  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
305  * @plat: driver data platform structure
306  * @np: device tree node
307  * @dev: device pointer
308  * Description:
309  * The mdio bus will be allocated in case of a phy transceiver is on board;
310  * it will be NULL if the fixed-link is configured.
311  * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
312  * in any case (for DSA, mdio must be registered even if fixed-link).
313  * The table below sums the supported configurations:
314  *	-------------------------------
315  *	snps,phy-addr	|     Y
316  *	-------------------------------
317  *	phy-handle	|     Y
318  *	-------------------------------
319  *	fixed-link	|     N
320  *	-------------------------------
321  *	snps,dwmac-mdio	|
322  *	  even if	|     Y
323  *	fixed-link	|
324  *	-------------------------------
325  *
326  * It returns 0 in case of success otherwise -ENODEV.
327  */
328 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
329 			 struct device_node *np, struct device *dev)
330 {
331 	bool mdio = true;
332 	static const struct of_device_id need_mdio_ids[] = {
333 		{ .compatible = "snps,dwc-qos-ethernet-4.10" },
334 		{},
335 	};
336 
337 	/* If phy-handle property is passed from DT, use it as the PHY */
338 	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
339 	if (plat->phy_node)
340 		dev_dbg(dev, "Found phy-handle subnode\n");
341 
342 	/* If phy-handle is not specified, check if we have a fixed-phy */
343 	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
344 		if ((of_phy_register_fixed_link(np) < 0))
345 			return -ENODEV;
346 
347 		dev_dbg(dev, "Found fixed-link subnode\n");
348 		plat->phy_node = of_node_get(np);
349 		mdio = false;
350 	}
351 
352 	if (of_match_node(need_mdio_ids, np)) {
353 		plat->mdio_node = of_get_child_by_name(np, "mdio");
354 	} else {
355 		/**
356 		 * If snps,dwmac-mdio is passed from DT, always register
357 		 * the MDIO
358 		 */
359 		for_each_child_of_node(np, plat->mdio_node) {
360 			if (of_device_is_compatible(plat->mdio_node,
361 						    "snps,dwmac-mdio"))
362 				break;
363 		}
364 	}
365 
366 	if (plat->mdio_node) {
367 		dev_dbg(dev, "Found MDIO subnode\n");
368 		mdio = true;
369 	}
370 
371 	if (mdio)
372 		plat->mdio_bus_data =
373 			devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
374 				     GFP_KERNEL);
375 	return 0;
376 }
377 
378 /**
379  * stmmac_probe_config_dt - parse device-tree driver parameters
380  * @pdev: platform_device structure
381  * @mac: MAC address to use
382  * Description:
383  * this function is to read the driver parameters from device-tree and
384  * set some private fields that will be used by the main at runtime.
385  */
386 struct plat_stmmacenet_data *
387 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
388 {
389 	struct device_node *np = pdev->dev.of_node;
390 	struct plat_stmmacenet_data *plat;
391 	struct stmmac_dma_cfg *dma_cfg;
392 	int rc;
393 
394 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
395 	if (!plat)
396 		return ERR_PTR(-ENOMEM);
397 
398 	*mac = of_get_mac_address(np);
399 	plat->interface = of_get_phy_mode(np);
400 
401 	/* Get max speed of operation from device tree */
402 	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
403 		plat->max_speed = -1;
404 
405 	plat->bus_id = of_alias_get_id(np, "ethernet");
406 	if (plat->bus_id < 0)
407 		plat->bus_id = 0;
408 
409 	/* Default to phy auto-detection */
410 	plat->phy_addr = -1;
411 
412 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
413 	 * and warn of its use. Remove this when phy node support is added.
414 	 */
415 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
416 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
417 
418 	/* To Configure PHY by using all device-tree supported properties */
419 	rc = stmmac_dt_phy(plat, np, &pdev->dev);
420 	if (rc)
421 		return ERR_PTR(rc);
422 
423 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
424 
425 	of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
426 
427 	plat->force_sf_dma_mode =
428 		of_property_read_bool(np, "snps,force_sf_dma_mode");
429 
430 	plat->en_tx_lpi_clockgating =
431 		of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
432 
433 	/* Set the maxmtu to a default of JUMBO_LEN in case the
434 	 * parameter is not present in the device tree.
435 	 */
436 	plat->maxmtu = JUMBO_LEN;
437 
438 	/* Set default value for multicast hash bins */
439 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
440 
441 	/* Set default value for unicast filter entries */
442 	plat->unicast_filter_entries = 1;
443 
444 	/*
445 	 * Currently only the properties needed on SPEAr600
446 	 * are provided. All other properties should be added
447 	 * once needed on other platforms.
448 	 */
449 	if (of_device_is_compatible(np, "st,spear600-gmac") ||
450 		of_device_is_compatible(np, "snps,dwmac-3.50a") ||
451 		of_device_is_compatible(np, "snps,dwmac-3.70a") ||
452 		of_device_is_compatible(np, "snps,dwmac")) {
453 		/* Note that the max-frame-size parameter as defined in the
454 		 * ePAPR v1.1 spec is defined as max-frame-size, it's
455 		 * actually used as the IEEE definition of MAC Client
456 		 * data, or MTU. The ePAPR specification is confusing as
457 		 * the definition is max-frame-size, but usage examples
458 		 * are clearly MTUs
459 		 */
460 		of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
461 		of_property_read_u32(np, "snps,multicast-filter-bins",
462 				     &plat->multicast_filter_bins);
463 		of_property_read_u32(np, "snps,perfect-filter-entries",
464 				     &plat->unicast_filter_entries);
465 		plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
466 					       plat->unicast_filter_entries);
467 		plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
468 					      plat->multicast_filter_bins);
469 		plat->has_gmac = 1;
470 		plat->pmt = 1;
471 	}
472 
473 	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
474 	    of_device_is_compatible(np, "snps,dwmac-4.10a") ||
475 	    of_device_is_compatible(np, "snps,dwmac-4.20a")) {
476 		plat->has_gmac4 = 1;
477 		plat->has_gmac = 0;
478 		plat->pmt = 1;
479 		plat->tso_en = of_property_read_bool(np, "snps,tso");
480 	}
481 
482 	if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
483 		of_device_is_compatible(np, "snps,dwmac-3.710")) {
484 		plat->enh_desc = 1;
485 		plat->bugged_jumbo = 1;
486 		plat->force_sf_dma_mode = 1;
487 	}
488 
489 	if (of_device_is_compatible(np, "snps,dwxgmac")) {
490 		plat->has_xgmac = 1;
491 		plat->pmt = 1;
492 		plat->tso_en = of_property_read_bool(np, "snps,tso");
493 	}
494 
495 	dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
496 			       GFP_KERNEL);
497 	if (!dma_cfg) {
498 		stmmac_remove_config_dt(pdev, plat);
499 		return ERR_PTR(-ENOMEM);
500 	}
501 	plat->dma_cfg = dma_cfg;
502 
503 	of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
504 	if (!dma_cfg->pbl)
505 		dma_cfg->pbl = DEFAULT_DMA_PBL;
506 	of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
507 	of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
508 	dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
509 
510 	dma_cfg->aal = of_property_read_bool(np, "snps,aal");
511 	dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
512 	dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
513 
514 	plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
515 	if (plat->force_thresh_dma_mode) {
516 		plat->force_sf_dma_mode = 0;
517 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
518 	}
519 
520 	of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
521 
522 	plat->axi = stmmac_axi_setup(pdev);
523 
524 	rc = stmmac_mtl_setup(pdev, plat);
525 	if (rc) {
526 		stmmac_remove_config_dt(pdev, plat);
527 		return ERR_PTR(rc);
528 	}
529 
530 	/* clock setup */
531 	plat->stmmac_clk = devm_clk_get(&pdev->dev,
532 					STMMAC_RESOURCE_NAME);
533 	if (IS_ERR(plat->stmmac_clk)) {
534 		dev_warn(&pdev->dev, "Cannot get CSR clock\n");
535 		plat->stmmac_clk = NULL;
536 	}
537 	clk_prepare_enable(plat->stmmac_clk);
538 
539 	plat->pclk = devm_clk_get(&pdev->dev, "pclk");
540 	if (IS_ERR(plat->pclk)) {
541 		if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
542 			goto error_pclk_get;
543 
544 		plat->pclk = NULL;
545 	}
546 	clk_prepare_enable(plat->pclk);
547 
548 	/* Fall-back to main clock in case of no PTP ref is passed */
549 	plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
550 	if (IS_ERR(plat->clk_ptp_ref)) {
551 		plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
552 		plat->clk_ptp_ref = NULL;
553 		dev_warn(&pdev->dev, "PTP uses main clock\n");
554 	} else {
555 		plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
556 		dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
557 	}
558 
559 	plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
560 						  STMMAC_RESOURCE_NAME);
561 	if (IS_ERR(plat->stmmac_rst)) {
562 		if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
563 			goto error_hw_init;
564 
565 		dev_info(&pdev->dev, "no reset control found\n");
566 		plat->stmmac_rst = NULL;
567 	}
568 
569 	return plat;
570 
571 error_hw_init:
572 	clk_disable_unprepare(plat->pclk);
573 error_pclk_get:
574 	clk_disable_unprepare(plat->stmmac_clk);
575 
576 	return ERR_PTR(-EPROBE_DEFER);
577 }
578 
579 /**
580  * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
581  * @pdev: platform_device structure
582  * @plat: driver data platform structure
583  *
584  * Release resources claimed by stmmac_probe_config_dt().
585  */
586 void stmmac_remove_config_dt(struct platform_device *pdev,
587 			     struct plat_stmmacenet_data *plat)
588 {
589 	struct device_node *np = pdev->dev.of_node;
590 
591 	if (of_phy_is_fixed_link(np))
592 		of_phy_deregister_fixed_link(np);
593 	of_node_put(plat->phy_node);
594 	of_node_put(plat->mdio_node);
595 }
596 #else
597 struct plat_stmmacenet_data *
598 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
599 {
600 	return ERR_PTR(-EINVAL);
601 }
602 
603 void stmmac_remove_config_dt(struct platform_device *pdev,
604 			     struct plat_stmmacenet_data *plat)
605 {
606 }
607 #endif /* CONFIG_OF */
608 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
609 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
610 
611 int stmmac_get_platform_resources(struct platform_device *pdev,
612 				  struct stmmac_resources *stmmac_res)
613 {
614 	struct resource *res;
615 
616 	memset(stmmac_res, 0, sizeof(*stmmac_res));
617 
618 	/* Get IRQ information early to have an ability to ask for deferred
619 	 * probe if needed before we went too far with resource allocation.
620 	 */
621 	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
622 	if (stmmac_res->irq < 0) {
623 		if (stmmac_res->irq != -EPROBE_DEFER) {
624 			dev_err(&pdev->dev,
625 				"MAC IRQ configuration information not found\n");
626 		}
627 		return stmmac_res->irq;
628 	}
629 
630 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
631 	 * The external wake up irq can be passed through the platform code
632 	 * named as "eth_wake_irq"
633 	 *
634 	 * In case the wake up interrupt is not passed from the platform
635 	 * so the driver will continue to use the mac irq (ndev->irq)
636 	 */
637 	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
638 	if (stmmac_res->wol_irq < 0) {
639 		if (stmmac_res->wol_irq == -EPROBE_DEFER)
640 			return -EPROBE_DEFER;
641 		stmmac_res->wol_irq = stmmac_res->irq;
642 	}
643 
644 	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
645 	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
646 		return -EPROBE_DEFER;
647 
648 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
649 	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
650 
651 	return PTR_ERR_OR_ZERO(stmmac_res->addr);
652 }
653 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
654 
655 /**
656  * stmmac_pltfr_remove
657  * @pdev: platform device pointer
658  * Description: this function calls the main to free the net resources
659  * and calls the platforms hook and release the resources (e.g. mem).
660  */
661 int stmmac_pltfr_remove(struct platform_device *pdev)
662 {
663 	struct net_device *ndev = platform_get_drvdata(pdev);
664 	struct stmmac_priv *priv = netdev_priv(ndev);
665 	struct plat_stmmacenet_data *plat = priv->plat;
666 	int ret = stmmac_dvr_remove(&pdev->dev);
667 
668 	if (plat->exit)
669 		plat->exit(pdev, plat->bsp_priv);
670 
671 	stmmac_remove_config_dt(pdev, plat);
672 
673 	return ret;
674 }
675 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
676 
677 #ifdef CONFIG_PM_SLEEP
678 /**
679  * stmmac_pltfr_suspend
680  * @dev: device pointer
681  * Description: this function is invoked when suspend the driver and it direcly
682  * call the main suspend function and then, if required, on some platform, it
683  * can call an exit helper.
684  */
685 static int stmmac_pltfr_suspend(struct device *dev)
686 {
687 	int ret;
688 	struct net_device *ndev = dev_get_drvdata(dev);
689 	struct stmmac_priv *priv = netdev_priv(ndev);
690 	struct platform_device *pdev = to_platform_device(dev);
691 
692 	ret = stmmac_suspend(dev);
693 	if (priv->plat->exit)
694 		priv->plat->exit(pdev, priv->plat->bsp_priv);
695 
696 	return ret;
697 }
698 
699 /**
700  * stmmac_pltfr_resume
701  * @dev: device pointer
702  * Description: this function is invoked when resume the driver before calling
703  * the main resume function, on some platforms, it can call own init helper
704  * if required.
705  */
706 static int stmmac_pltfr_resume(struct device *dev)
707 {
708 	struct net_device *ndev = dev_get_drvdata(dev);
709 	struct stmmac_priv *priv = netdev_priv(ndev);
710 	struct platform_device *pdev = to_platform_device(dev);
711 
712 	if (priv->plat->init)
713 		priv->plat->init(pdev, priv->plat->bsp_priv);
714 
715 	return stmmac_resume(dev);
716 }
717 #endif /* CONFIG_PM_SLEEP */
718 
719 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
720 				       stmmac_pltfr_resume);
721 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
722 
723 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
724 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
725 MODULE_LICENSE("GPL");
726