1 /*******************************************************************************
2   This contains the functions to handle the platform driver.
3 
4   Copyright (C) 2007-2011  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/platform_device.h>
22 #include <linux/module.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_net.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 
29 #include "stmmac.h"
30 #include "stmmac_platform.h"
31 
32 #ifdef CONFIG_OF
33 
34 /**
35  * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
36  * @mcast_bins: Multicast filtering bins
37  * Description:
38  * this function validates the number of Multicast filtering bins specified
39  * by the configuration through the device tree. The Synopsys GMAC supports
40  * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
41  * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
42  * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
43  * invalid and will cause the filtering algorithm to use Multicast
44  * promiscuous mode.
45  */
46 static int dwmac1000_validate_mcast_bins(int mcast_bins)
47 {
48 	int x = mcast_bins;
49 
50 	switch (x) {
51 	case HASH_TABLE_SIZE:
52 	case 128:
53 	case 256:
54 		break;
55 	default:
56 		x = 0;
57 		pr_info("Hash table entries set to unexpected value %d",
58 			mcast_bins);
59 		break;
60 	}
61 	return x;
62 }
63 
64 /**
65  * dwmac1000_validate_ucast_entries - validate the Unicast address entries
66  * @ucast_entries: number of Unicast address entries
67  * Description:
68  * This function validates the number of Unicast address entries supported
69  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70  * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71  * logic. This function validates a valid, supported configuration is
72  * selected, and defaults to 1 Unicast address if an unsupported
73  * configuration is selected.
74  */
75 static int dwmac1000_validate_ucast_entries(int ucast_entries)
76 {
77 	int x = ucast_entries;
78 
79 	switch (x) {
80 	case 1 ... 32:
81 	case 64:
82 	case 128:
83 		break;
84 	default:
85 		x = 1;
86 		pr_info("Unicast table entries set to unexpected value %d\n",
87 			ucast_entries);
88 		break;
89 	}
90 	return x;
91 }
92 
93 /**
94  * stmmac_axi_setup - parse DT parameters for programming the AXI register
95  * @pdev: platform device
96  * Description:
97  * if required, from device-tree the AXI internal register can be tuned
98  * by using platform parameters.
99  */
100 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
101 {
102 	struct device_node *np;
103 	struct stmmac_axi *axi;
104 
105 	np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
106 	if (!np)
107 		return NULL;
108 
109 	axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
110 	if (!axi) {
111 		of_node_put(np);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
116 	axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
117 	axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
118 	axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
119 	axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
120 	axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
121 
122 	if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
123 		axi->axi_wr_osr_lmt = 1;
124 	if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
125 		axi->axi_rd_osr_lmt = 1;
126 	of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
127 	of_node_put(np);
128 
129 	return axi;
130 }
131 
132 /**
133  * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
134  * @pdev: platform device
135  */
136 static int stmmac_mtl_setup(struct platform_device *pdev,
137 			    struct plat_stmmacenet_data *plat)
138 {
139 	struct device_node *q_node;
140 	struct device_node *rx_node;
141 	struct device_node *tx_node;
142 	u8 queue = 0;
143 	int ret = 0;
144 
145 	/* For backwards-compatibility with device trees that don't have any
146 	 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
147 	 * to one RX and TX queues each.
148 	 */
149 	plat->rx_queues_to_use = 1;
150 	plat->tx_queues_to_use = 1;
151 
152 	/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
153 	 * to always set this, otherwise Queue will be classified as AVB
154 	 * (because MTL_QUEUE_AVB = 0).
155 	 */
156 	plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
157 	plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 
159 	rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
160 	if (!rx_node)
161 		return ret;
162 
163 	tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
164 	if (!tx_node) {
165 		of_node_put(rx_node);
166 		return ret;
167 	}
168 
169 	/* Processing RX queues common config */
170 	if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
171 				 &plat->rx_queues_to_use))
172 		plat->rx_queues_to_use = 1;
173 
174 	if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
175 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
176 	else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
177 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
178 	else
179 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
180 
181 	/* Processing individual RX queue config */
182 	for_each_child_of_node(rx_node, q_node) {
183 		if (queue >= plat->rx_queues_to_use)
184 			break;
185 
186 		if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
187 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
188 		else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
189 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
190 		else
191 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
192 
193 		if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
194 					 &plat->rx_queues_cfg[queue].chan))
195 			plat->rx_queues_cfg[queue].chan = queue;
196 		/* TODO: Dynamic mapping to be included in the future */
197 
198 		if (of_property_read_u32(q_node, "snps,priority",
199 					&plat->rx_queues_cfg[queue].prio)) {
200 			plat->rx_queues_cfg[queue].prio = 0;
201 			plat->rx_queues_cfg[queue].use_prio = false;
202 		} else {
203 			plat->rx_queues_cfg[queue].use_prio = true;
204 		}
205 
206 		/* RX queue specific packet type routing */
207 		if (of_property_read_bool(q_node, "snps,route-avcp"))
208 			plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
209 		else if (of_property_read_bool(q_node, "snps,route-ptp"))
210 			plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
211 		else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
212 			plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
213 		else if (of_property_read_bool(q_node, "snps,route-up"))
214 			plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
215 		else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
216 			plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
217 		else
218 			plat->rx_queues_cfg[queue].pkt_route = 0x0;
219 
220 		queue++;
221 	}
222 	if (queue != plat->rx_queues_to_use) {
223 		ret = -EINVAL;
224 		dev_err(&pdev->dev, "Not all RX queues were configured\n");
225 		goto out;
226 	}
227 
228 	/* Processing TX queues common config */
229 	if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
230 				 &plat->tx_queues_to_use))
231 		plat->tx_queues_to_use = 1;
232 
233 	if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
234 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
235 	else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
236 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
237 	else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
238 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
239 	else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
240 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
241 	else
242 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
243 
244 	queue = 0;
245 
246 	/* Processing individual TX queue config */
247 	for_each_child_of_node(tx_node, q_node) {
248 		if (queue >= plat->tx_queues_to_use)
249 			break;
250 
251 		if (of_property_read_u32(q_node, "snps,weight",
252 					 &plat->tx_queues_cfg[queue].weight))
253 			plat->tx_queues_cfg[queue].weight = 0x10 + queue;
254 
255 		if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
256 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
257 		} else if (of_property_read_bool(q_node,
258 						 "snps,avb-algorithm")) {
259 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
260 
261 			/* Credit Base Shaper parameters used by AVB */
262 			if (of_property_read_u32(q_node, "snps,send_slope",
263 				&plat->tx_queues_cfg[queue].send_slope))
264 				plat->tx_queues_cfg[queue].send_slope = 0x0;
265 			if (of_property_read_u32(q_node, "snps,idle_slope",
266 				&plat->tx_queues_cfg[queue].idle_slope))
267 				plat->tx_queues_cfg[queue].idle_slope = 0x0;
268 			if (of_property_read_u32(q_node, "snps,high_credit",
269 				&plat->tx_queues_cfg[queue].high_credit))
270 				plat->tx_queues_cfg[queue].high_credit = 0x0;
271 			if (of_property_read_u32(q_node, "snps,low_credit",
272 				&plat->tx_queues_cfg[queue].low_credit))
273 				plat->tx_queues_cfg[queue].low_credit = 0x0;
274 		} else {
275 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
276 		}
277 
278 		if (of_property_read_u32(q_node, "snps,priority",
279 					&plat->tx_queues_cfg[queue].prio)) {
280 			plat->tx_queues_cfg[queue].prio = 0;
281 			plat->tx_queues_cfg[queue].use_prio = false;
282 		} else {
283 			plat->tx_queues_cfg[queue].use_prio = true;
284 		}
285 
286 		queue++;
287 	}
288 	if (queue != plat->tx_queues_to_use) {
289 		ret = -EINVAL;
290 		dev_err(&pdev->dev, "Not all TX queues were configured\n");
291 		goto out;
292 	}
293 
294 out:
295 	of_node_put(rx_node);
296 	of_node_put(tx_node);
297 	of_node_put(q_node);
298 
299 	return ret;
300 }
301 
302 /**
303  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
304  * @plat: driver data platform structure
305  * @np: device tree node
306  * @dev: device pointer
307  * Description:
308  * The mdio bus will be allocated in case of a phy transceiver is on board;
309  * it will be NULL if the fixed-link is configured.
310  * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
311  * in any case (for DSA, mdio must be registered even if fixed-link).
312  * The table below sums the supported configurations:
313  *	-------------------------------
314  *	snps,phy-addr	|     Y
315  *	-------------------------------
316  *	phy-handle	|     Y
317  *	-------------------------------
318  *	fixed-link	|     N
319  *	-------------------------------
320  *	snps,dwmac-mdio	|
321  *	  even if	|     Y
322  *	fixed-link	|
323  *	-------------------------------
324  *
325  * It returns 0 in case of success otherwise -ENODEV.
326  */
327 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
328 			 struct device_node *np, struct device *dev)
329 {
330 	bool mdio = true;
331 	static const struct of_device_id need_mdio_ids[] = {
332 		{ .compatible = "snps,dwc-qos-ethernet-4.10" },
333 		{},
334 	};
335 
336 	/* If phy-handle property is passed from DT, use it as the PHY */
337 	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
338 	if (plat->phy_node)
339 		dev_dbg(dev, "Found phy-handle subnode\n");
340 
341 	/* If phy-handle is not specified, check if we have a fixed-phy */
342 	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
343 		if ((of_phy_register_fixed_link(np) < 0))
344 			return -ENODEV;
345 
346 		dev_dbg(dev, "Found fixed-link subnode\n");
347 		plat->phy_node = of_node_get(np);
348 		mdio = false;
349 	}
350 
351 	if (of_match_node(need_mdio_ids, np)) {
352 		plat->mdio_node = of_get_child_by_name(np, "mdio");
353 	} else {
354 		/**
355 		 * If snps,dwmac-mdio is passed from DT, always register
356 		 * the MDIO
357 		 */
358 		for_each_child_of_node(np, plat->mdio_node) {
359 			if (of_device_is_compatible(plat->mdio_node,
360 						    "snps,dwmac-mdio"))
361 				break;
362 		}
363 	}
364 
365 	if (plat->mdio_node) {
366 		dev_dbg(dev, "Found MDIO subnode\n");
367 		mdio = true;
368 	}
369 
370 	if (mdio)
371 		plat->mdio_bus_data =
372 			devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
373 				     GFP_KERNEL);
374 	return 0;
375 }
376 
377 /**
378  * stmmac_probe_config_dt - parse device-tree driver parameters
379  * @pdev: platform_device structure
380  * @mac: MAC address to use
381  * Description:
382  * this function is to read the driver parameters from device-tree and
383  * set some private fields that will be used by the main at runtime.
384  */
385 struct plat_stmmacenet_data *
386 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
387 {
388 	struct device_node *np = pdev->dev.of_node;
389 	struct plat_stmmacenet_data *plat;
390 	struct stmmac_dma_cfg *dma_cfg;
391 	int rc;
392 
393 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
394 	if (!plat)
395 		return ERR_PTR(-ENOMEM);
396 
397 	*mac = of_get_mac_address(np);
398 	plat->interface = of_get_phy_mode(np);
399 
400 	/* Get max speed of operation from device tree */
401 	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
402 		plat->max_speed = -1;
403 
404 	plat->bus_id = of_alias_get_id(np, "ethernet");
405 	if (plat->bus_id < 0)
406 		plat->bus_id = 0;
407 
408 	/* Default to phy auto-detection */
409 	plat->phy_addr = -1;
410 
411 	/* Get clk_csr from device tree */
412 	of_property_read_u32(np, "clk_csr", &plat->clk_csr);
413 
414 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
415 	 * and warn of its use. Remove this when phy node support is added.
416 	 */
417 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
418 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
419 
420 	/* To Configure PHY by using all device-tree supported properties */
421 	rc = stmmac_dt_phy(plat, np, &pdev->dev);
422 	if (rc)
423 		return ERR_PTR(rc);
424 
425 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
426 
427 	of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
428 
429 	plat->force_sf_dma_mode =
430 		of_property_read_bool(np, "snps,force_sf_dma_mode");
431 
432 	plat->en_tx_lpi_clockgating =
433 		of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
434 
435 	/* Set the maxmtu to a default of JUMBO_LEN in case the
436 	 * parameter is not present in the device tree.
437 	 */
438 	plat->maxmtu = JUMBO_LEN;
439 
440 	/* Set default value for multicast hash bins */
441 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
442 
443 	/* Set default value for unicast filter entries */
444 	plat->unicast_filter_entries = 1;
445 
446 	/*
447 	 * Currently only the properties needed on SPEAr600
448 	 * are provided. All other properties should be added
449 	 * once needed on other platforms.
450 	 */
451 	if (of_device_is_compatible(np, "st,spear600-gmac") ||
452 		of_device_is_compatible(np, "snps,dwmac-3.50a") ||
453 		of_device_is_compatible(np, "snps,dwmac-3.70a") ||
454 		of_device_is_compatible(np, "snps,dwmac")) {
455 		/* Note that the max-frame-size parameter as defined in the
456 		 * ePAPR v1.1 spec is defined as max-frame-size, it's
457 		 * actually used as the IEEE definition of MAC Client
458 		 * data, or MTU. The ePAPR specification is confusing as
459 		 * the definition is max-frame-size, but usage examples
460 		 * are clearly MTUs
461 		 */
462 		of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
463 		of_property_read_u32(np, "snps,multicast-filter-bins",
464 				     &plat->multicast_filter_bins);
465 		of_property_read_u32(np, "snps,perfect-filter-entries",
466 				     &plat->unicast_filter_entries);
467 		plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
468 					       plat->unicast_filter_entries);
469 		plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
470 					      plat->multicast_filter_bins);
471 		plat->has_gmac = 1;
472 		plat->pmt = 1;
473 	}
474 
475 	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
476 	    of_device_is_compatible(np, "snps,dwmac-4.10a") ||
477 	    of_device_is_compatible(np, "snps,dwmac-4.20a")) {
478 		plat->has_gmac4 = 1;
479 		plat->has_gmac = 0;
480 		plat->pmt = 1;
481 		plat->tso_en = of_property_read_bool(np, "snps,tso");
482 	}
483 
484 	if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
485 		of_device_is_compatible(np, "snps,dwmac-3.710")) {
486 		plat->enh_desc = 1;
487 		plat->bugged_jumbo = 1;
488 		plat->force_sf_dma_mode = 1;
489 	}
490 
491 	if (of_device_is_compatible(np, "snps,dwxgmac")) {
492 		plat->has_xgmac = 1;
493 		plat->pmt = 1;
494 		plat->tso_en = of_property_read_bool(np, "snps,tso");
495 	}
496 
497 	dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
498 			       GFP_KERNEL);
499 	if (!dma_cfg) {
500 		stmmac_remove_config_dt(pdev, plat);
501 		return ERR_PTR(-ENOMEM);
502 	}
503 	plat->dma_cfg = dma_cfg;
504 
505 	of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
506 	if (!dma_cfg->pbl)
507 		dma_cfg->pbl = DEFAULT_DMA_PBL;
508 	of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
509 	of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
510 	dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
511 
512 	dma_cfg->aal = of_property_read_bool(np, "snps,aal");
513 	dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
514 	dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
515 
516 	plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
517 	if (plat->force_thresh_dma_mode) {
518 		plat->force_sf_dma_mode = 0;
519 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
520 	}
521 
522 	of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
523 
524 	plat->axi = stmmac_axi_setup(pdev);
525 
526 	rc = stmmac_mtl_setup(pdev, plat);
527 	if (rc) {
528 		stmmac_remove_config_dt(pdev, plat);
529 		return ERR_PTR(rc);
530 	}
531 
532 	/* clock setup */
533 	plat->stmmac_clk = devm_clk_get(&pdev->dev,
534 					STMMAC_RESOURCE_NAME);
535 	if (IS_ERR(plat->stmmac_clk)) {
536 		dev_warn(&pdev->dev, "Cannot get CSR clock\n");
537 		plat->stmmac_clk = NULL;
538 	}
539 	clk_prepare_enable(plat->stmmac_clk);
540 
541 	plat->pclk = devm_clk_get(&pdev->dev, "pclk");
542 	if (IS_ERR(plat->pclk)) {
543 		if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
544 			goto error_pclk_get;
545 
546 		plat->pclk = NULL;
547 	}
548 	clk_prepare_enable(plat->pclk);
549 
550 	/* Fall-back to main clock in case of no PTP ref is passed */
551 	plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
552 	if (IS_ERR(plat->clk_ptp_ref)) {
553 		plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
554 		plat->clk_ptp_ref = NULL;
555 		dev_warn(&pdev->dev, "PTP uses main clock\n");
556 	} else {
557 		plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
558 		dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
559 	}
560 
561 	plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
562 						  STMMAC_RESOURCE_NAME);
563 	if (IS_ERR(plat->stmmac_rst)) {
564 		if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
565 			goto error_hw_init;
566 
567 		dev_info(&pdev->dev, "no reset control found\n");
568 		plat->stmmac_rst = NULL;
569 	}
570 
571 	return plat;
572 
573 error_hw_init:
574 	clk_disable_unprepare(plat->pclk);
575 error_pclk_get:
576 	clk_disable_unprepare(plat->stmmac_clk);
577 
578 	return ERR_PTR(-EPROBE_DEFER);
579 }
580 
581 /**
582  * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
583  * @pdev: platform_device structure
584  * @plat: driver data platform structure
585  *
586  * Release resources claimed by stmmac_probe_config_dt().
587  */
588 void stmmac_remove_config_dt(struct platform_device *pdev,
589 			     struct plat_stmmacenet_data *plat)
590 {
591 	struct device_node *np = pdev->dev.of_node;
592 
593 	if (of_phy_is_fixed_link(np))
594 		of_phy_deregister_fixed_link(np);
595 	of_node_put(plat->phy_node);
596 	of_node_put(plat->mdio_node);
597 }
598 #else
599 struct plat_stmmacenet_data *
600 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
601 {
602 	return ERR_PTR(-EINVAL);
603 }
604 
605 void stmmac_remove_config_dt(struct platform_device *pdev,
606 			     struct plat_stmmacenet_data *plat)
607 {
608 }
609 #endif /* CONFIG_OF */
610 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
611 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
612 
613 int stmmac_get_platform_resources(struct platform_device *pdev,
614 				  struct stmmac_resources *stmmac_res)
615 {
616 	struct resource *res;
617 
618 	memset(stmmac_res, 0, sizeof(*stmmac_res));
619 
620 	/* Get IRQ information early to have an ability to ask for deferred
621 	 * probe if needed before we went too far with resource allocation.
622 	 */
623 	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
624 	if (stmmac_res->irq < 0) {
625 		if (stmmac_res->irq != -EPROBE_DEFER) {
626 			dev_err(&pdev->dev,
627 				"MAC IRQ configuration information not found\n");
628 		}
629 		return stmmac_res->irq;
630 	}
631 
632 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
633 	 * The external wake up irq can be passed through the platform code
634 	 * named as "eth_wake_irq"
635 	 *
636 	 * In case the wake up interrupt is not passed from the platform
637 	 * so the driver will continue to use the mac irq (ndev->irq)
638 	 */
639 	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
640 	if (stmmac_res->wol_irq < 0) {
641 		if (stmmac_res->wol_irq == -EPROBE_DEFER)
642 			return -EPROBE_DEFER;
643 		stmmac_res->wol_irq = stmmac_res->irq;
644 	}
645 
646 	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
647 	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
648 		return -EPROBE_DEFER;
649 
650 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
651 	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
652 
653 	return PTR_ERR_OR_ZERO(stmmac_res->addr);
654 }
655 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
656 
657 /**
658  * stmmac_pltfr_remove
659  * @pdev: platform device pointer
660  * Description: this function calls the main to free the net resources
661  * and calls the platforms hook and release the resources (e.g. mem).
662  */
663 int stmmac_pltfr_remove(struct platform_device *pdev)
664 {
665 	struct net_device *ndev = platform_get_drvdata(pdev);
666 	struct stmmac_priv *priv = netdev_priv(ndev);
667 	struct plat_stmmacenet_data *plat = priv->plat;
668 	int ret = stmmac_dvr_remove(&pdev->dev);
669 
670 	if (plat->exit)
671 		plat->exit(pdev, plat->bsp_priv);
672 
673 	stmmac_remove_config_dt(pdev, plat);
674 
675 	return ret;
676 }
677 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
678 
679 #ifdef CONFIG_PM_SLEEP
680 /**
681  * stmmac_pltfr_suspend
682  * @dev: device pointer
683  * Description: this function is invoked when suspend the driver and it direcly
684  * call the main suspend function and then, if required, on some platform, it
685  * can call an exit helper.
686  */
687 static int stmmac_pltfr_suspend(struct device *dev)
688 {
689 	int ret;
690 	struct net_device *ndev = dev_get_drvdata(dev);
691 	struct stmmac_priv *priv = netdev_priv(ndev);
692 	struct platform_device *pdev = to_platform_device(dev);
693 
694 	ret = stmmac_suspend(dev);
695 	if (priv->plat->exit)
696 		priv->plat->exit(pdev, priv->plat->bsp_priv);
697 
698 	return ret;
699 }
700 
701 /**
702  * stmmac_pltfr_resume
703  * @dev: device pointer
704  * Description: this function is invoked when resume the driver before calling
705  * the main resume function, on some platforms, it can call own init helper
706  * if required.
707  */
708 static int stmmac_pltfr_resume(struct device *dev)
709 {
710 	struct net_device *ndev = dev_get_drvdata(dev);
711 	struct stmmac_priv *priv = netdev_priv(ndev);
712 	struct platform_device *pdev = to_platform_device(dev);
713 
714 	if (priv->plat->init)
715 		priv->plat->init(pdev, priv->plat->bsp_priv);
716 
717 	return stmmac_resume(dev);
718 }
719 #endif /* CONFIG_PM_SLEEP */
720 
721 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
722 				       stmmac_pltfr_resume);
723 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
724 
725 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
726 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
727 MODULE_LICENSE("GPL");
728