1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116 
117 #include <linux/module.h>
118 #include <linux/device.h>
119 #include <linux/platform_device.h>
120 #include <linux/spinlock.h>
121 #include <linux/netdevice.h>
122 #include <linux/etherdevice.h>
123 #include <linux/io.h>
124 #include <linux/of.h>
125 #include <linux/of_net.h>
126 #include <linux/of_address.h>
127 #include <linux/of_platform.h>
128 #include <linux/clk.h>
129 #include <linux/property.h>
130 #include <linux/acpi.h>
131 #include <linux/mdio.h>
132 
133 #include "xgbe.h"
134 #include "xgbe-common.h"
135 
136 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_VERSION(XGBE_DRV_VERSION);
139 MODULE_DESCRIPTION(XGBE_DRV_DESC);
140 
141 static int debug = -1;
142 module_param(debug, int, S_IWUSR | S_IRUGO);
143 MODULE_PARM_DESC(debug, " Network interface message level setting");
144 
145 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
146 				      NETIF_MSG_IFUP);
147 
148 static const u32 xgbe_serdes_blwc[] = {
149 	XGBE_SPEED_1000_BLWC,
150 	XGBE_SPEED_2500_BLWC,
151 	XGBE_SPEED_10000_BLWC,
152 };
153 
154 static const u32 xgbe_serdes_cdr_rate[] = {
155 	XGBE_SPEED_1000_CDR,
156 	XGBE_SPEED_2500_CDR,
157 	XGBE_SPEED_10000_CDR,
158 };
159 
160 static const u32 xgbe_serdes_pq_skew[] = {
161 	XGBE_SPEED_1000_PQ,
162 	XGBE_SPEED_2500_PQ,
163 	XGBE_SPEED_10000_PQ,
164 };
165 
166 static const u32 xgbe_serdes_tx_amp[] = {
167 	XGBE_SPEED_1000_TXAMP,
168 	XGBE_SPEED_2500_TXAMP,
169 	XGBE_SPEED_10000_TXAMP,
170 };
171 
172 static const u32 xgbe_serdes_dfe_tap_cfg[] = {
173 	XGBE_SPEED_1000_DFE_TAP_CONFIG,
174 	XGBE_SPEED_2500_DFE_TAP_CONFIG,
175 	XGBE_SPEED_10000_DFE_TAP_CONFIG,
176 };
177 
178 static const u32 xgbe_serdes_dfe_tap_ena[] = {
179 	XGBE_SPEED_1000_DFE_TAP_ENABLE,
180 	XGBE_SPEED_2500_DFE_TAP_ENABLE,
181 	XGBE_SPEED_10000_DFE_TAP_ENABLE,
182 };
183 
184 static void xgbe_default_config(struct xgbe_prv_data *pdata)
185 {
186 	DBGPR("-->xgbe_default_config\n");
187 
188 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
189 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
190 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
191 	pdata->tx_pbl = DMA_PBL_16;
192 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
193 	pdata->rx_sf_mode = MTL_RSF_DISABLE;
194 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
195 	pdata->rx_pbl = DMA_PBL_16;
196 	pdata->pause_autoneg = 1;
197 	pdata->tx_pause = 1;
198 	pdata->rx_pause = 1;
199 	pdata->phy_speed = SPEED_UNKNOWN;
200 	pdata->power_down = 0;
201 
202 	DBGPR("<--xgbe_default_config\n");
203 }
204 
205 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
206 {
207 	xgbe_init_function_ptrs_dev(&pdata->hw_if);
208 	xgbe_init_function_ptrs_phy(&pdata->phy_if);
209 	xgbe_init_function_ptrs_desc(&pdata->desc_if);
210 }
211 
212 #ifdef CONFIG_ACPI
213 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
214 {
215 	struct device *dev = pdata->dev;
216 	u32 property;
217 	int ret;
218 
219 	/* Obtain the system clock setting */
220 	ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
221 	if (ret) {
222 		dev_err(dev, "unable to obtain %s property\n",
223 			XGBE_ACPI_DMA_FREQ);
224 		return ret;
225 	}
226 	pdata->sysclk_rate = property;
227 
228 	/* Obtain the PTP clock setting */
229 	ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
230 	if (ret) {
231 		dev_err(dev, "unable to obtain %s property\n",
232 			XGBE_ACPI_PTP_FREQ);
233 		return ret;
234 	}
235 	pdata->ptpclk_rate = property;
236 
237 	return 0;
238 }
239 #else   /* CONFIG_ACPI */
240 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
241 {
242 	return -EINVAL;
243 }
244 #endif  /* CONFIG_ACPI */
245 
246 #ifdef CONFIG_OF
247 static int xgbe_of_support(struct xgbe_prv_data *pdata)
248 {
249 	struct device *dev = pdata->dev;
250 
251 	/* Obtain the system clock setting */
252 	pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
253 	if (IS_ERR(pdata->sysclk)) {
254 		dev_err(dev, "dma devm_clk_get failed\n");
255 		return PTR_ERR(pdata->sysclk);
256 	}
257 	pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
258 
259 	/* Obtain the PTP clock setting */
260 	pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
261 	if (IS_ERR(pdata->ptpclk)) {
262 		dev_err(dev, "ptp devm_clk_get failed\n");
263 		return PTR_ERR(pdata->ptpclk);
264 	}
265 	pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
266 
267 	return 0;
268 }
269 
270 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
271 {
272 	struct device *dev = pdata->dev;
273 	struct device_node *phy_node;
274 	struct platform_device *phy_pdev;
275 
276 	phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
277 	if (phy_node) {
278 		/* Old style device tree:
279 		 *   The XGBE and PHY resources are separate
280 		 */
281 		phy_pdev = of_find_device_by_node(phy_node);
282 		of_node_put(phy_node);
283 	} else {
284 		/* New style device tree:
285 		 *   The XGBE and PHY resources are grouped together with
286 		 *   the PHY resources listed last
287 		 */
288 		get_device(dev);
289 		phy_pdev = pdata->pdev;
290 	}
291 
292 	return phy_pdev;
293 }
294 #else   /* CONFIG_OF */
295 static int xgbe_of_support(struct xgbe_prv_data *pdata)
296 {
297 	return -EINVAL;
298 }
299 
300 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
301 {
302 	return NULL;
303 }
304 #endif  /* CONFIG_OF */
305 
306 static unsigned int xgbe_resource_count(struct platform_device *pdev,
307 					unsigned int type)
308 {
309 	unsigned int count;
310 	int i;
311 
312 	for (i = 0, count = 0; i < pdev->num_resources; i++) {
313 		struct resource *res = &pdev->resource[i];
314 
315 		if (type == resource_type(res))
316 			count++;
317 	}
318 
319 	return count;
320 }
321 
322 static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
323 {
324 	struct platform_device *phy_pdev;
325 
326 	if (pdata->use_acpi) {
327 		get_device(pdata->dev);
328 		phy_pdev = pdata->pdev;
329 	} else {
330 		phy_pdev = xgbe_of_get_phy_pdev(pdata);
331 	}
332 
333 	return phy_pdev;
334 }
335 
336 static int xgbe_probe(struct platform_device *pdev)
337 {
338 	struct xgbe_prv_data *pdata;
339 	struct net_device *netdev;
340 	struct device *dev = &pdev->dev, *phy_dev;
341 	struct platform_device *phy_pdev;
342 	struct resource *res;
343 	const char *phy_mode;
344 	unsigned int i, phy_memnum, phy_irqnum;
345 	enum dev_dma_attr attr;
346 	int ret;
347 
348 	DBGPR("--> xgbe_probe\n");
349 
350 	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
351 				   XGBE_MAX_DMA_CHANNELS);
352 	if (!netdev) {
353 		dev_err(dev, "alloc_etherdev failed\n");
354 		ret = -ENOMEM;
355 		goto err_alloc;
356 	}
357 	SET_NETDEV_DEV(netdev, dev);
358 	pdata = netdev_priv(netdev);
359 	pdata->netdev = netdev;
360 	pdata->pdev = pdev;
361 	pdata->adev = ACPI_COMPANION(dev);
362 	pdata->dev = dev;
363 	platform_set_drvdata(pdev, netdev);
364 
365 	spin_lock_init(&pdata->lock);
366 	spin_lock_init(&pdata->xpcs_lock);
367 	mutex_init(&pdata->rss_mutex);
368 	spin_lock_init(&pdata->tstamp_lock);
369 
370 	pdata->msg_enable = netif_msg_init(debug, default_msg_level);
371 
372 	set_bit(XGBE_DOWN, &pdata->dev_state);
373 
374 	/* Check if we should use ACPI or DT */
375 	pdata->use_acpi = dev->of_node ? 0 : 1;
376 
377 	phy_pdev = xgbe_get_phy_pdev(pdata);
378 	if (!phy_pdev) {
379 		dev_err(dev, "unable to obtain phy device\n");
380 		ret = -EINVAL;
381 		goto err_phydev;
382 	}
383 	phy_dev = &phy_pdev->dev;
384 
385 	if (pdev == phy_pdev) {
386 		/* New style device tree or ACPI:
387 		 *   The XGBE and PHY resources are grouped together with
388 		 *   the PHY resources listed last
389 		 */
390 		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
391 		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
392 	} else {
393 		/* Old style device tree:
394 		 *   The XGBE and PHY resources are separate
395 		 */
396 		phy_memnum = 0;
397 		phy_irqnum = 0;
398 	}
399 
400 	/* Set and validate the number of descriptors for a ring */
401 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
402 	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
403 	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
404 		dev_err(dev, "tx descriptor count (%d) is not valid\n",
405 			pdata->tx_desc_count);
406 		ret = -EINVAL;
407 		goto err_io;
408 	}
409 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
410 	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
411 	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
412 		dev_err(dev, "rx descriptor count (%d) is not valid\n",
413 			pdata->rx_desc_count);
414 		ret = -EINVAL;
415 		goto err_io;
416 	}
417 
418 	/* Obtain the mmio areas for the device */
419 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
420 	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
421 	if (IS_ERR(pdata->xgmac_regs)) {
422 		dev_err(dev, "xgmac ioremap failed\n");
423 		ret = PTR_ERR(pdata->xgmac_regs);
424 		goto err_io;
425 	}
426 	if (netif_msg_probe(pdata))
427 		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
428 
429 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
430 	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
431 	if (IS_ERR(pdata->xpcs_regs)) {
432 		dev_err(dev, "xpcs ioremap failed\n");
433 		ret = PTR_ERR(pdata->xpcs_regs);
434 		goto err_io;
435 	}
436 	if (netif_msg_probe(pdata))
437 		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
438 
439 	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
440 	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
441 	if (IS_ERR(pdata->rxtx_regs)) {
442 		dev_err(dev, "rxtx ioremap failed\n");
443 		ret = PTR_ERR(pdata->rxtx_regs);
444 		goto err_io;
445 	}
446 	if (netif_msg_probe(pdata))
447 		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);
448 
449 	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
450 	pdata->sir0_regs = devm_ioremap_resource(dev, res);
451 	if (IS_ERR(pdata->sir0_regs)) {
452 		dev_err(dev, "sir0 ioremap failed\n");
453 		ret = PTR_ERR(pdata->sir0_regs);
454 		goto err_io;
455 	}
456 	if (netif_msg_probe(pdata))
457 		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);
458 
459 	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
460 	pdata->sir1_regs = devm_ioremap_resource(dev, res);
461 	if (IS_ERR(pdata->sir1_regs)) {
462 		dev_err(dev, "sir1 ioremap failed\n");
463 		ret = PTR_ERR(pdata->sir1_regs);
464 		goto err_io;
465 	}
466 	if (netif_msg_probe(pdata))
467 		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);
468 
469 	/* Retrieve the MAC address */
470 	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
471 					    pdata->mac_addr,
472 					    sizeof(pdata->mac_addr));
473 	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
474 		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
475 		if (!ret)
476 			ret = -EINVAL;
477 		goto err_io;
478 	}
479 
480 	/* Retrieve the PHY mode - it must be "xgmii" */
481 	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
482 					  &phy_mode);
483 	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
484 		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
485 		if (!ret)
486 			ret = -EINVAL;
487 		goto err_io;
488 	}
489 	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
490 
491 	/* Check for per channel interrupt support */
492 	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
493 		pdata->per_channel_irq = 1;
494 
495 	/* Retrieve the PHY speedset */
496 	ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
497 				       &pdata->speed_set);
498 	if (ret) {
499 		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
500 		goto err_io;
501 	}
502 
503 	switch (pdata->speed_set) {
504 	case XGBE_SPEEDSET_1000_10000:
505 	case XGBE_SPEEDSET_2500_10000:
506 		break;
507 	default:
508 		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
509 		ret = -EINVAL;
510 		goto err_io;
511 	}
512 
513 	/* Retrieve the PHY configuration properties */
514 	if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
515 		ret = device_property_read_u32_array(phy_dev,
516 						     XGBE_BLWC_PROPERTY,
517 						     pdata->serdes_blwc,
518 						     XGBE_SPEEDS);
519 		if (ret) {
520 			dev_err(dev, "invalid %s property\n",
521 				XGBE_BLWC_PROPERTY);
522 			goto err_io;
523 		}
524 	} else {
525 		memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
526 		       sizeof(pdata->serdes_blwc));
527 	}
528 
529 	if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
530 		ret = device_property_read_u32_array(phy_dev,
531 						     XGBE_CDR_RATE_PROPERTY,
532 						     pdata->serdes_cdr_rate,
533 						     XGBE_SPEEDS);
534 		if (ret) {
535 			dev_err(dev, "invalid %s property\n",
536 				XGBE_CDR_RATE_PROPERTY);
537 			goto err_io;
538 		}
539 	} else {
540 		memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
541 		       sizeof(pdata->serdes_cdr_rate));
542 	}
543 
544 	if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
545 		ret = device_property_read_u32_array(phy_dev,
546 						     XGBE_PQ_SKEW_PROPERTY,
547 						     pdata->serdes_pq_skew,
548 						     XGBE_SPEEDS);
549 		if (ret) {
550 			dev_err(dev, "invalid %s property\n",
551 				XGBE_PQ_SKEW_PROPERTY);
552 			goto err_io;
553 		}
554 	} else {
555 		memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
556 		       sizeof(pdata->serdes_pq_skew));
557 	}
558 
559 	if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
560 		ret = device_property_read_u32_array(phy_dev,
561 						     XGBE_TX_AMP_PROPERTY,
562 						     pdata->serdes_tx_amp,
563 						     XGBE_SPEEDS);
564 		if (ret) {
565 			dev_err(dev, "invalid %s property\n",
566 				XGBE_TX_AMP_PROPERTY);
567 			goto err_io;
568 		}
569 	} else {
570 		memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
571 		       sizeof(pdata->serdes_tx_amp));
572 	}
573 
574 	if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
575 		ret = device_property_read_u32_array(phy_dev,
576 						     XGBE_DFE_CFG_PROPERTY,
577 						     pdata->serdes_dfe_tap_cfg,
578 						     XGBE_SPEEDS);
579 		if (ret) {
580 			dev_err(dev, "invalid %s property\n",
581 				XGBE_DFE_CFG_PROPERTY);
582 			goto err_io;
583 		}
584 	} else {
585 		memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
586 		       sizeof(pdata->serdes_dfe_tap_cfg));
587 	}
588 
589 	if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
590 		ret = device_property_read_u32_array(phy_dev,
591 						     XGBE_DFE_ENA_PROPERTY,
592 						     pdata->serdes_dfe_tap_ena,
593 						     XGBE_SPEEDS);
594 		if (ret) {
595 			dev_err(dev, "invalid %s property\n",
596 				XGBE_DFE_ENA_PROPERTY);
597 			goto err_io;
598 		}
599 	} else {
600 		memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
601 		       sizeof(pdata->serdes_dfe_tap_ena));
602 	}
603 
604 	/* Obtain device settings unique to ACPI/OF */
605 	if (pdata->use_acpi)
606 		ret = xgbe_acpi_support(pdata);
607 	else
608 		ret = xgbe_of_support(pdata);
609 	if (ret)
610 		goto err_io;
611 
612 	/* Set the DMA coherency values */
613 	attr = device_get_dma_attr(dev);
614 	if (attr == DEV_DMA_NOT_SUPPORTED) {
615 		dev_err(dev, "DMA is not supported");
616 		goto err_io;
617 	}
618 	pdata->coherent = (attr == DEV_DMA_COHERENT);
619 	if (pdata->coherent) {
620 		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
621 		pdata->arcache = XGBE_DMA_OS_ARCACHE;
622 		pdata->awcache = XGBE_DMA_OS_AWCACHE;
623 	} else {
624 		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
625 		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
626 		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
627 	}
628 
629 	/* Get the device interrupt */
630 	ret = platform_get_irq(pdev, 0);
631 	if (ret < 0) {
632 		dev_err(dev, "platform_get_irq 0 failed\n");
633 		goto err_io;
634 	}
635 	pdata->dev_irq = ret;
636 
637 	/* Get the auto-negotiation interrupt */
638 	ret = platform_get_irq(phy_pdev, phy_irqnum++);
639 	if (ret < 0) {
640 		dev_err(dev, "platform_get_irq phy 0 failed\n");
641 		goto err_io;
642 	}
643 	pdata->an_irq = ret;
644 
645 	netdev->irq = pdata->dev_irq;
646 	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
647 	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
648 
649 	/* Set all the function pointers */
650 	xgbe_init_all_fptrs(pdata);
651 
652 	/* Issue software reset to device */
653 	pdata->hw_if.exit(pdata);
654 
655 	/* Populate the hardware features */
656 	xgbe_get_all_hw_features(pdata);
657 
658 	/* Set default configuration data */
659 	xgbe_default_config(pdata);
660 
661 	/* Set the DMA mask */
662 	ret = dma_set_mask_and_coherent(dev,
663 					DMA_BIT_MASK(pdata->hw_feat.dma_width));
664 	if (ret) {
665 		dev_err(dev, "dma_set_mask_and_coherent failed\n");
666 		goto err_io;
667 	}
668 
669 	/* Calculate the number of Tx and Rx rings to be created
670 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
671 	 *   the number of Tx queues to the number of Tx channels
672 	 *   enabled
673 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
674 	 *   number of Rx queues
675 	 */
676 	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
677 				     pdata->hw_feat.tx_ch_cnt);
678 	pdata->tx_q_count = pdata->tx_ring_count;
679 	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
680 	if (ret) {
681 		dev_err(dev, "error setting real tx queue count\n");
682 		goto err_io;
683 	}
684 
685 	pdata->rx_ring_count = min_t(unsigned int,
686 				     netif_get_num_default_rss_queues(),
687 				     pdata->hw_feat.rx_ch_cnt);
688 	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
689 	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
690 	if (ret) {
691 		dev_err(dev, "error setting real rx queue count\n");
692 		goto err_io;
693 	}
694 
695 	/* Initialize RSS hash key and lookup table */
696 	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
697 
698 	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
699 		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
700 			       i % pdata->rx_ring_count);
701 
702 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
703 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
704 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
705 
706 	/* Call MDIO/PHY initialization routine */
707 	pdata->phy_if.phy_init(pdata);
708 
709 	/* Set device operations */
710 	netdev->netdev_ops = xgbe_get_netdev_ops();
711 	netdev->ethtool_ops = xgbe_get_ethtool_ops();
712 #ifdef CONFIG_AMD_XGBE_DCB
713 	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
714 #endif
715 
716 	/* Set device features */
717 	netdev->hw_features = NETIF_F_SG |
718 			      NETIF_F_IP_CSUM |
719 			      NETIF_F_IPV6_CSUM |
720 			      NETIF_F_RXCSUM |
721 			      NETIF_F_TSO |
722 			      NETIF_F_TSO6 |
723 			      NETIF_F_GRO |
724 			      NETIF_F_HW_VLAN_CTAG_RX |
725 			      NETIF_F_HW_VLAN_CTAG_TX |
726 			      NETIF_F_HW_VLAN_CTAG_FILTER;
727 
728 	if (pdata->hw_feat.rss)
729 		netdev->hw_features |= NETIF_F_RXHASH;
730 
731 	netdev->vlan_features |= NETIF_F_SG |
732 				 NETIF_F_IP_CSUM |
733 				 NETIF_F_IPV6_CSUM |
734 				 NETIF_F_TSO |
735 				 NETIF_F_TSO6;
736 
737 	netdev->features |= netdev->hw_features;
738 	pdata->netdev_features = netdev->features;
739 
740 	netdev->priv_flags |= IFF_UNICAST_FLT;
741 
742 	/* Use default watchdog timeout */
743 	netdev->watchdog_timeo = 0;
744 
745 	xgbe_init_rx_coalesce(pdata);
746 	xgbe_init_tx_coalesce(pdata);
747 
748 	netif_carrier_off(netdev);
749 	ret = register_netdev(netdev);
750 	if (ret) {
751 		dev_err(dev, "net device registration failed\n");
752 		goto err_io;
753 	}
754 
755 	/* Create the PHY/ANEG name based on netdev name */
756 	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
757 		 netdev_name(netdev));
758 
759 	/* Create workqueues */
760 	pdata->dev_workqueue =
761 		create_singlethread_workqueue(netdev_name(netdev));
762 	if (!pdata->dev_workqueue) {
763 		netdev_err(netdev, "device workqueue creation failed\n");
764 		ret = -ENOMEM;
765 		goto err_netdev;
766 	}
767 
768 	pdata->an_workqueue =
769 		create_singlethread_workqueue(pdata->an_name);
770 	if (!pdata->an_workqueue) {
771 		netdev_err(netdev, "phy workqueue creation failed\n");
772 		ret = -ENOMEM;
773 		goto err_wq;
774 	}
775 
776 	xgbe_ptp_register(pdata);
777 
778 	xgbe_debugfs_init(pdata);
779 
780 	platform_device_put(phy_pdev);
781 
782 	netdev_notice(netdev, "net device enabled\n");
783 
784 	DBGPR("<-- xgbe_probe\n");
785 
786 	return 0;
787 
788 err_wq:
789 	destroy_workqueue(pdata->dev_workqueue);
790 
791 err_netdev:
792 	unregister_netdev(netdev);
793 
794 err_io:
795 	platform_device_put(phy_pdev);
796 
797 err_phydev:
798 	free_netdev(netdev);
799 
800 err_alloc:
801 	dev_notice(dev, "net device not enabled\n");
802 
803 	return ret;
804 }
805 
806 static int xgbe_remove(struct platform_device *pdev)
807 {
808 	struct net_device *netdev = platform_get_drvdata(pdev);
809 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
810 
811 	DBGPR("-->xgbe_remove\n");
812 
813 	xgbe_debugfs_exit(pdata);
814 
815 	xgbe_ptp_unregister(pdata);
816 
817 	flush_workqueue(pdata->an_workqueue);
818 	destroy_workqueue(pdata->an_workqueue);
819 
820 	flush_workqueue(pdata->dev_workqueue);
821 	destroy_workqueue(pdata->dev_workqueue);
822 
823 	unregister_netdev(netdev);
824 
825 	free_netdev(netdev);
826 
827 	DBGPR("<--xgbe_remove\n");
828 
829 	return 0;
830 }
831 
832 #ifdef CONFIG_PM
833 static int xgbe_suspend(struct device *dev)
834 {
835 	struct net_device *netdev = dev_get_drvdata(dev);
836 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
837 	int ret = 0;
838 
839 	DBGPR("-->xgbe_suspend\n");
840 
841 	if (netif_running(netdev))
842 		ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
843 
844 	pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
845 	pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
846 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
847 
848 	DBGPR("<--xgbe_suspend\n");
849 
850 	return ret;
851 }
852 
853 static int xgbe_resume(struct device *dev)
854 {
855 	struct net_device *netdev = dev_get_drvdata(dev);
856 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
857 	int ret = 0;
858 
859 	DBGPR("-->xgbe_resume\n");
860 
861 	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
862 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
863 
864 	if (netif_running(netdev))
865 		ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
866 
867 	DBGPR("<--xgbe_resume\n");
868 
869 	return ret;
870 }
871 #endif /* CONFIG_PM */
872 
873 #ifdef CONFIG_ACPI
874 static const struct acpi_device_id xgbe_acpi_match[] = {
875 	{ "AMDI8001", 0 },
876 	{},
877 };
878 
879 MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
880 #endif
881 
882 #ifdef CONFIG_OF
883 static const struct of_device_id xgbe_of_match[] = {
884 	{ .compatible = "amd,xgbe-seattle-v1a", },
885 	{},
886 };
887 
888 MODULE_DEVICE_TABLE(of, xgbe_of_match);
889 #endif
890 
891 static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
892 
893 static struct platform_driver xgbe_driver = {
894 	.driver = {
895 		.name = "amd-xgbe",
896 #ifdef CONFIG_ACPI
897 		.acpi_match_table = xgbe_acpi_match,
898 #endif
899 #ifdef CONFIG_OF
900 		.of_match_table = xgbe_of_match,
901 #endif
902 		.pm = &xgbe_pm_ops,
903 	},
904 	.probe = xgbe_probe,
905 	.remove = xgbe_remove,
906 };
907 
908 module_platform_driver(xgbe_driver);
909