1 /*******************************************************************************
2   This contains the functions to handle the platform driver.
3 
4   Copyright (C) 2007-2011  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/platform_device.h>
22 #include <linux/module.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_net.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 
29 #include "stmmac.h"
30 #include "stmmac_platform.h"
31 
32 #ifdef CONFIG_OF
33 
34 /**
35  * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
36  * @mcast_bins: Multicast filtering bins
37  * Description:
38  * this function validates the number of Multicast filtering bins specified
39  * by the configuration through the device tree. The Synopsys GMAC supports
40  * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
41  * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
42  * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
43  * invalid and will cause the filtering algorithm to use Multicast
44  * promiscuous mode.
45  */
46 static int dwmac1000_validate_mcast_bins(int mcast_bins)
47 {
48 	int x = mcast_bins;
49 
50 	switch (x) {
51 	case HASH_TABLE_SIZE:
52 	case 128:
53 	case 256:
54 		break;
55 	default:
56 		x = 0;
57 		pr_info("Hash table entries set to unexpected value %d",
58 			mcast_bins);
59 		break;
60 	}
61 	return x;
62 }
63 
64 /**
65  * dwmac1000_validate_ucast_entries - validate the Unicast address entries
66  * @ucast_entries: number of Unicast address entries
67  * Description:
68  * This function validates the number of Unicast address entries supported
69  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70  * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71  * logic. This function validates a valid, supported configuration is
72  * selected, and defaults to 1 Unicast address if an unsupported
73  * configuration is selected.
74  */
75 static int dwmac1000_validate_ucast_entries(int ucast_entries)
76 {
77 	int x = ucast_entries;
78 
79 	switch (x) {
80 	case 1 ... 32:
81 	case 64:
82 	case 128:
83 		break;
84 	default:
85 		x = 1;
86 		pr_info("Unicast table entries set to unexpected value %d\n",
87 			ucast_entries);
88 		break;
89 	}
90 	return x;
91 }
92 
93 /**
94  * stmmac_axi_setup - parse DT parameters for programming the AXI register
95  * @pdev: platform device
96  * Description:
97  * if required, from device-tree the AXI internal register can be tuned
98  * by using platform parameters.
99  */
100 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
101 {
102 	struct device_node *np;
103 	struct stmmac_axi *axi;
104 
105 	np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
106 	if (!np)
107 		return NULL;
108 
109 	axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
110 	if (!axi) {
111 		of_node_put(np);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
116 	axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
117 	axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
118 	axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
119 	axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
120 	axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
121 
122 	if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
123 		axi->axi_wr_osr_lmt = 1;
124 	if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
125 		axi->axi_rd_osr_lmt = 1;
126 	of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
127 	of_node_put(np);
128 
129 	return axi;
130 }
131 
132 /**
133  * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
134  * @pdev: platform device
135  */
136 static int stmmac_mtl_setup(struct platform_device *pdev,
137 			    struct plat_stmmacenet_data *plat)
138 {
139 	struct device_node *q_node;
140 	struct device_node *rx_node;
141 	struct device_node *tx_node;
142 	u8 queue = 0;
143 	int ret = 0;
144 
145 	/* For backwards-compatibility with device trees that don't have any
146 	 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
147 	 * to one RX and TX queues each.
148 	 */
149 	plat->rx_queues_to_use = 1;
150 	plat->tx_queues_to_use = 1;
151 
152 	/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
153 	 * to always set this, otherwise Queue will be classified as AVB
154 	 * (because MTL_QUEUE_AVB = 0).
155 	 */
156 	plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
157 	plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 
159 	rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
160 	if (!rx_node)
161 		return ret;
162 
163 	tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
164 	if (!tx_node) {
165 		of_node_put(rx_node);
166 		return ret;
167 	}
168 
169 	/* Processing RX queues common config */
170 	if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
171 				 &plat->rx_queues_to_use))
172 		plat->rx_queues_to_use = 1;
173 
174 	if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
175 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
176 	else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
177 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
178 	else
179 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
180 
181 	/* Processing individual RX queue config */
182 	for_each_child_of_node(rx_node, q_node) {
183 		if (queue >= plat->rx_queues_to_use)
184 			break;
185 
186 		if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
187 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
188 		else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
189 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
190 		else
191 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
192 
193 		if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
194 					 &plat->rx_queues_cfg[queue].chan))
195 			plat->rx_queues_cfg[queue].chan = queue;
196 		/* TODO: Dynamic mapping to be included in the future */
197 
198 		if (of_property_read_u32(q_node, "snps,priority",
199 					&plat->rx_queues_cfg[queue].prio)) {
200 			plat->rx_queues_cfg[queue].prio = 0;
201 			plat->rx_queues_cfg[queue].use_prio = false;
202 		} else {
203 			plat->rx_queues_cfg[queue].use_prio = true;
204 		}
205 
206 		/* RX queue specific packet type routing */
207 		if (of_property_read_bool(q_node, "snps,route-avcp"))
208 			plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
209 		else if (of_property_read_bool(q_node, "snps,route-ptp"))
210 			plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
211 		else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
212 			plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
213 		else if (of_property_read_bool(q_node, "snps,route-up"))
214 			plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
215 		else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
216 			plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
217 		else
218 			plat->rx_queues_cfg[queue].pkt_route = 0x0;
219 
220 		queue++;
221 	}
222 	if (queue != plat->rx_queues_to_use) {
223 		ret = -EINVAL;
224 		dev_err(&pdev->dev, "Not all RX queues were configured\n");
225 		goto out;
226 	}
227 
228 	/* Processing TX queues common config */
229 	if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
230 				 &plat->tx_queues_to_use))
231 		plat->tx_queues_to_use = 1;
232 
233 	if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
234 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
235 	else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
236 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
237 	else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
238 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
239 	else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
240 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
241 	else
242 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
243 
244 	queue = 0;
245 
246 	/* Processing individual TX queue config */
247 	for_each_child_of_node(tx_node, q_node) {
248 		if (queue >= plat->tx_queues_to_use)
249 			break;
250 
251 		if (of_property_read_u32(q_node, "snps,weight",
252 					 &plat->tx_queues_cfg[queue].weight))
253 			plat->tx_queues_cfg[queue].weight = 0x10 + queue;
254 
255 		if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
256 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
257 		} else if (of_property_read_bool(q_node,
258 						 "snps,avb-algorithm")) {
259 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
260 
261 			/* Credit Base Shaper parameters used by AVB */
262 			if (of_property_read_u32(q_node, "snps,send_slope",
263 				&plat->tx_queues_cfg[queue].send_slope))
264 				plat->tx_queues_cfg[queue].send_slope = 0x0;
265 			if (of_property_read_u32(q_node, "snps,idle_slope",
266 				&plat->tx_queues_cfg[queue].idle_slope))
267 				plat->tx_queues_cfg[queue].idle_slope = 0x0;
268 			if (of_property_read_u32(q_node, "snps,high_credit",
269 				&plat->tx_queues_cfg[queue].high_credit))
270 				plat->tx_queues_cfg[queue].high_credit = 0x0;
271 			if (of_property_read_u32(q_node, "snps,low_credit",
272 				&plat->tx_queues_cfg[queue].low_credit))
273 				plat->tx_queues_cfg[queue].low_credit = 0x0;
274 		} else {
275 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
276 		}
277 
278 		if (of_property_read_u32(q_node, "snps,priority",
279 					&plat->tx_queues_cfg[queue].prio)) {
280 			plat->tx_queues_cfg[queue].prio = 0;
281 			plat->tx_queues_cfg[queue].use_prio = false;
282 		} else {
283 			plat->tx_queues_cfg[queue].use_prio = true;
284 		}
285 
286 		queue++;
287 	}
288 	if (queue != plat->tx_queues_to_use) {
289 		ret = -EINVAL;
290 		dev_err(&pdev->dev, "Not all TX queues were configured\n");
291 		goto out;
292 	}
293 
294 out:
295 	of_node_put(rx_node);
296 	of_node_put(tx_node);
297 	of_node_put(q_node);
298 
299 	return ret;
300 }
301 
302 /**
303  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
304  * @plat: driver data platform structure
305  * @np: device tree node
306  * @dev: device pointer
307  * Description:
308  * The mdio bus will be allocated in case of a phy transceiver is on board;
309  * it will be NULL if the fixed-link is configured.
310  * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
311  * in any case (for DSA, mdio must be registered even if fixed-link).
312  * The table below sums the supported configurations:
313  *	-------------------------------
314  *	snps,phy-addr	|     Y
315  *	-------------------------------
316  *	phy-handle	|     Y
317  *	-------------------------------
318  *	fixed-link	|     N
319  *	-------------------------------
320  *	snps,dwmac-mdio	|
321  *	  even if	|     Y
322  *	fixed-link	|
323  *	-------------------------------
324  *
325  * It returns 0 in case of success otherwise -ENODEV.
326  */
327 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
328 			 struct device_node *np, struct device *dev)
329 {
330 	bool mdio = true;
331 	static const struct of_device_id need_mdio_ids[] = {
332 		{ .compatible = "snps,dwc-qos-ethernet-4.10" },
333 		{},
334 	};
335 
336 	/* If phy-handle property is passed from DT, use it as the PHY */
337 	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
338 	if (plat->phy_node)
339 		dev_dbg(dev, "Found phy-handle subnode\n");
340 
341 	/* If phy-handle is not specified, check if we have a fixed-phy */
342 	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
343 		if ((of_phy_register_fixed_link(np) < 0))
344 			return -ENODEV;
345 
346 		dev_dbg(dev, "Found fixed-link subnode\n");
347 		plat->phy_node = of_node_get(np);
348 		mdio = false;
349 	}
350 
351 	if (of_match_node(need_mdio_ids, np)) {
352 		plat->mdio_node = of_get_child_by_name(np, "mdio");
353 	} else {
354 		/**
355 		 * If snps,dwmac-mdio is passed from DT, always register
356 		 * the MDIO
357 		 */
358 		for_each_child_of_node(np, plat->mdio_node) {
359 			if (of_device_is_compatible(plat->mdio_node,
360 						    "snps,dwmac-mdio"))
361 				break;
362 		}
363 	}
364 
365 	if (plat->mdio_node) {
366 		dev_dbg(dev, "Found MDIO subnode\n");
367 		mdio = true;
368 	}
369 
370 	if (mdio)
371 		plat->mdio_bus_data =
372 			devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
373 				     GFP_KERNEL);
374 	return 0;
375 }
376 
377 /**
378  * stmmac_probe_config_dt - parse device-tree driver parameters
379  * @pdev: platform_device structure
380  * @mac: MAC address to use
381  * Description:
382  * this function is to read the driver parameters from device-tree and
383  * set some private fields that will be used by the main at runtime.
384  */
385 struct plat_stmmacenet_data *
386 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
387 {
388 	struct device_node *np = pdev->dev.of_node;
389 	struct plat_stmmacenet_data *plat;
390 	struct stmmac_dma_cfg *dma_cfg;
391 	int rc;
392 
393 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
394 	if (!plat)
395 		return ERR_PTR(-ENOMEM);
396 
397 	*mac = of_get_mac_address(np);
398 	plat->interface = of_get_phy_mode(np);
399 
400 	/* Get max speed of operation from device tree */
401 	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
402 		plat->max_speed = -1;
403 
404 	plat->bus_id = of_alias_get_id(np, "ethernet");
405 	if (plat->bus_id < 0)
406 		plat->bus_id = 0;
407 
408 	/* Default to phy auto-detection */
409 	plat->phy_addr = -1;
410 
411 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
412 	 * and warn of its use. Remove this when phy node support is added.
413 	 */
414 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
415 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
416 
417 	/* To Configure PHY by using all device-tree supported properties */
418 	rc = stmmac_dt_phy(plat, np, &pdev->dev);
419 	if (rc)
420 		return ERR_PTR(rc);
421 
422 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
423 
424 	of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
425 
426 	plat->force_sf_dma_mode =
427 		of_property_read_bool(np, "snps,force_sf_dma_mode");
428 
429 	plat->en_tx_lpi_clockgating =
430 		of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
431 
432 	/* Set the maxmtu to a default of JUMBO_LEN in case the
433 	 * parameter is not present in the device tree.
434 	 */
435 	plat->maxmtu = JUMBO_LEN;
436 
437 	/* Set default value for multicast hash bins */
438 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
439 
440 	/* Set default value for unicast filter entries */
441 	plat->unicast_filter_entries = 1;
442 
443 	/*
444 	 * Currently only the properties needed on SPEAr600
445 	 * are provided. All other properties should be added
446 	 * once needed on other platforms.
447 	 */
448 	if (of_device_is_compatible(np, "st,spear600-gmac") ||
449 		of_device_is_compatible(np, "snps,dwmac-3.50a") ||
450 		of_device_is_compatible(np, "snps,dwmac-3.70a") ||
451 		of_device_is_compatible(np, "snps,dwmac")) {
452 		/* Note that the max-frame-size parameter as defined in the
453 		 * ePAPR v1.1 spec is defined as max-frame-size, it's
454 		 * actually used as the IEEE definition of MAC Client
455 		 * data, or MTU. The ePAPR specification is confusing as
456 		 * the definition is max-frame-size, but usage examples
457 		 * are clearly MTUs
458 		 */
459 		of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
460 		of_property_read_u32(np, "snps,multicast-filter-bins",
461 				     &plat->multicast_filter_bins);
462 		of_property_read_u32(np, "snps,perfect-filter-entries",
463 				     &plat->unicast_filter_entries);
464 		plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
465 					       plat->unicast_filter_entries);
466 		plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
467 					      plat->multicast_filter_bins);
468 		plat->has_gmac = 1;
469 		plat->pmt = 1;
470 	}
471 
472 	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
473 	    of_device_is_compatible(np, "snps,dwmac-4.10a") ||
474 	    of_device_is_compatible(np, "snps,dwmac-4.20a")) {
475 		plat->has_gmac4 = 1;
476 		plat->has_gmac = 0;
477 		plat->pmt = 1;
478 		plat->tso_en = of_property_read_bool(np, "snps,tso");
479 	}
480 
481 	if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
482 		of_device_is_compatible(np, "snps,dwmac-3.710")) {
483 		plat->enh_desc = 1;
484 		plat->bugged_jumbo = 1;
485 		plat->force_sf_dma_mode = 1;
486 	}
487 
488 	if (of_device_is_compatible(np, "snps,dwxgmac")) {
489 		plat->has_xgmac = 1;
490 		plat->pmt = 1;
491 		plat->tso_en = of_property_read_bool(np, "snps,tso");
492 	}
493 
494 	dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
495 			       GFP_KERNEL);
496 	if (!dma_cfg) {
497 		stmmac_remove_config_dt(pdev, plat);
498 		return ERR_PTR(-ENOMEM);
499 	}
500 	plat->dma_cfg = dma_cfg;
501 
502 	of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
503 	if (!dma_cfg->pbl)
504 		dma_cfg->pbl = DEFAULT_DMA_PBL;
505 	of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
506 	of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
507 	dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
508 
509 	dma_cfg->aal = of_property_read_bool(np, "snps,aal");
510 	dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
511 	dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
512 
513 	plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
514 	if (plat->force_thresh_dma_mode) {
515 		plat->force_sf_dma_mode = 0;
516 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
517 	}
518 
519 	of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
520 
521 	plat->axi = stmmac_axi_setup(pdev);
522 
523 	rc = stmmac_mtl_setup(pdev, plat);
524 	if (rc) {
525 		stmmac_remove_config_dt(pdev, plat);
526 		return ERR_PTR(rc);
527 	}
528 
529 	/* clock setup */
530 	plat->stmmac_clk = devm_clk_get(&pdev->dev,
531 					STMMAC_RESOURCE_NAME);
532 	if (IS_ERR(plat->stmmac_clk)) {
533 		dev_warn(&pdev->dev, "Cannot get CSR clock\n");
534 		plat->stmmac_clk = NULL;
535 	}
536 	clk_prepare_enable(plat->stmmac_clk);
537 
538 	plat->pclk = devm_clk_get(&pdev->dev, "pclk");
539 	if (IS_ERR(plat->pclk)) {
540 		if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
541 			goto error_pclk_get;
542 
543 		plat->pclk = NULL;
544 	}
545 	clk_prepare_enable(plat->pclk);
546 
547 	/* Fall-back to main clock in case of no PTP ref is passed */
548 	plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
549 	if (IS_ERR(plat->clk_ptp_ref)) {
550 		plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
551 		plat->clk_ptp_ref = NULL;
552 		dev_warn(&pdev->dev, "PTP uses main clock\n");
553 	} else {
554 		plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
555 		dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
556 	}
557 
558 	plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
559 						  STMMAC_RESOURCE_NAME);
560 	if (IS_ERR(plat->stmmac_rst)) {
561 		if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
562 			goto error_hw_init;
563 
564 		dev_info(&pdev->dev, "no reset control found\n");
565 		plat->stmmac_rst = NULL;
566 	}
567 
568 	return plat;
569 
570 error_hw_init:
571 	clk_disable_unprepare(plat->pclk);
572 error_pclk_get:
573 	clk_disable_unprepare(plat->stmmac_clk);
574 
575 	return ERR_PTR(-EPROBE_DEFER);
576 }
577 
578 /**
579  * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
580  * @pdev: platform_device structure
581  * @plat: driver data platform structure
582  *
583  * Release resources claimed by stmmac_probe_config_dt().
584  */
585 void stmmac_remove_config_dt(struct platform_device *pdev,
586 			     struct plat_stmmacenet_data *plat)
587 {
588 	struct device_node *np = pdev->dev.of_node;
589 
590 	if (of_phy_is_fixed_link(np))
591 		of_phy_deregister_fixed_link(np);
592 	of_node_put(plat->phy_node);
593 	of_node_put(plat->mdio_node);
594 }
595 #else
596 struct plat_stmmacenet_data *
597 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
598 {
599 	return ERR_PTR(-EINVAL);
600 }
601 
602 void stmmac_remove_config_dt(struct platform_device *pdev,
603 			     struct plat_stmmacenet_data *plat)
604 {
605 }
606 #endif /* CONFIG_OF */
607 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
608 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
609 
610 int stmmac_get_platform_resources(struct platform_device *pdev,
611 				  struct stmmac_resources *stmmac_res)
612 {
613 	struct resource *res;
614 
615 	memset(stmmac_res, 0, sizeof(*stmmac_res));
616 
617 	/* Get IRQ information early to have an ability to ask for deferred
618 	 * probe if needed before we went too far with resource allocation.
619 	 */
620 	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
621 	if (stmmac_res->irq < 0) {
622 		if (stmmac_res->irq != -EPROBE_DEFER) {
623 			dev_err(&pdev->dev,
624 				"MAC IRQ configuration information not found\n");
625 		}
626 		return stmmac_res->irq;
627 	}
628 
629 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
630 	 * The external wake up irq can be passed through the platform code
631 	 * named as "eth_wake_irq"
632 	 *
633 	 * In case the wake up interrupt is not passed from the platform
634 	 * so the driver will continue to use the mac irq (ndev->irq)
635 	 */
636 	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
637 	if (stmmac_res->wol_irq < 0) {
638 		if (stmmac_res->wol_irq == -EPROBE_DEFER)
639 			return -EPROBE_DEFER;
640 		stmmac_res->wol_irq = stmmac_res->irq;
641 	}
642 
643 	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
644 	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
645 		return -EPROBE_DEFER;
646 
647 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
648 	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
649 
650 	return PTR_ERR_OR_ZERO(stmmac_res->addr);
651 }
652 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
653 
654 /**
655  * stmmac_pltfr_remove
656  * @pdev: platform device pointer
657  * Description: this function calls the main to free the net resources
658  * and calls the platforms hook and release the resources (e.g. mem).
659  */
660 int stmmac_pltfr_remove(struct platform_device *pdev)
661 {
662 	struct net_device *ndev = platform_get_drvdata(pdev);
663 	struct stmmac_priv *priv = netdev_priv(ndev);
664 	struct plat_stmmacenet_data *plat = priv->plat;
665 	int ret = stmmac_dvr_remove(&pdev->dev);
666 
667 	if (plat->exit)
668 		plat->exit(pdev, plat->bsp_priv);
669 
670 	stmmac_remove_config_dt(pdev, plat);
671 
672 	return ret;
673 }
674 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
675 
676 #ifdef CONFIG_PM_SLEEP
677 /**
678  * stmmac_pltfr_suspend
679  * @dev: device pointer
680  * Description: this function is invoked when suspend the driver and it direcly
681  * call the main suspend function and then, if required, on some platform, it
682  * can call an exit helper.
683  */
684 static int stmmac_pltfr_suspend(struct device *dev)
685 {
686 	int ret;
687 	struct net_device *ndev = dev_get_drvdata(dev);
688 	struct stmmac_priv *priv = netdev_priv(ndev);
689 	struct platform_device *pdev = to_platform_device(dev);
690 
691 	ret = stmmac_suspend(dev);
692 	if (priv->plat->exit)
693 		priv->plat->exit(pdev, priv->plat->bsp_priv);
694 
695 	return ret;
696 }
697 
698 /**
699  * stmmac_pltfr_resume
700  * @dev: device pointer
701  * Description: this function is invoked when resume the driver before calling
702  * the main resume function, on some platforms, it can call own init helper
703  * if required.
704  */
705 static int stmmac_pltfr_resume(struct device *dev)
706 {
707 	struct net_device *ndev = dev_get_drvdata(dev);
708 	struct stmmac_priv *priv = netdev_priv(ndev);
709 	struct platform_device *pdev = to_platform_device(dev);
710 
711 	if (priv->plat->init)
712 		priv->plat->init(pdev, priv->plat->bsp_priv);
713 
714 	return stmmac_resume(dev);
715 }
716 #endif /* CONFIG_PM_SLEEP */
717 
718 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
719 				       stmmac_pltfr_resume);
720 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
721 
722 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
723 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
724 MODULE_LICENSE("GPL");
725