1 /*
2  * Applied Micro X-Gene SoC Ethernet v2 Driver
3  *
4  * Copyright (c) 2017, Applied Micro Circuits Corporation
5  * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6  *	      Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "main.h"
23 
24 static const struct acpi_device_id xge_acpi_match[];
25 
26 static int xge_get_resources(struct xge_pdata *pdata)
27 {
28 	struct platform_device *pdev;
29 	struct net_device *ndev;
30 	struct device *dev;
31 	struct resource *res;
32 	int phy_mode, ret = 0;
33 
34 	pdev = pdata->pdev;
35 	dev = &pdev->dev;
36 	ndev = pdata->ndev;
37 
38 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
39 	if (!res) {
40 		dev_err(dev, "Resource enet_csr not defined\n");
41 		return -ENODEV;
42 	}
43 
44 	pdata->resources.base_addr = devm_ioremap(dev, res->start,
45 						  resource_size(res));
46 	if (!pdata->resources.base_addr) {
47 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
48 		return -ENOMEM;
49 	}
50 
51 	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
52 		eth_hw_addr_random(ndev);
53 
54 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
55 
56 	phy_mode = device_get_phy_mode(dev);
57 	if (phy_mode < 0) {
58 		dev_err(dev, "Unable to get phy-connection-type\n");
59 		return phy_mode;
60 	}
61 	pdata->resources.phy_mode = phy_mode;
62 
63 	if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
64 		dev_err(dev, "Incorrect phy-connection-type specified\n");
65 		return -ENODEV;
66 	}
67 
68 	ret = platform_get_irq(pdev, 0);
69 	if (ret <= 0) {
70 		dev_err(dev, "Unable to get ENET IRQ\n");
71 		ret = ret ? : -ENXIO;
72 		return ret;
73 	}
74 	pdata->resources.irq = ret;
75 
76 	return 0;
77 }
78 
79 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
80 {
81 	struct xge_pdata *pdata = netdev_priv(ndev);
82 	struct xge_desc_ring *ring = pdata->rx_ring;
83 	const u8 slots = XGENE_ENET_NUM_DESC - 1;
84 	struct device *dev = &pdata->pdev->dev;
85 	struct xge_raw_desc *raw_desc;
86 	u64 addr_lo, addr_hi;
87 	u8 tail = ring->tail;
88 	struct sk_buff *skb;
89 	dma_addr_t dma_addr;
90 	u16 len;
91 	int i;
92 
93 	for (i = 0; i < nbuf; i++) {
94 		raw_desc = &ring->raw_desc[tail];
95 
96 		len = XGENE_ENET_STD_MTU;
97 		skb = netdev_alloc_skb(ndev, len);
98 		if (unlikely(!skb))
99 			return -ENOMEM;
100 
101 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
102 		if (dma_mapping_error(dev, dma_addr)) {
103 			netdev_err(ndev, "DMA mapping error\n");
104 			dev_kfree_skb_any(skb);
105 			return -EINVAL;
106 		}
107 
108 		ring->pkt_info[tail].skb = skb;
109 		ring->pkt_info[tail].dma_addr = dma_addr;
110 
111 		addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
112 		addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
113 		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
114 					   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
115 					   SET_BITS(PKT_ADDRH,
116 						    upper_32_bits(dma_addr)));
117 
118 		dma_wmb();
119 		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
120 					   SET_BITS(E, 1));
121 		tail = (tail + 1) & slots;
122 	}
123 
124 	ring->tail = tail;
125 
126 	return 0;
127 }
128 
129 static int xge_init_hw(struct net_device *ndev)
130 {
131 	struct xge_pdata *pdata = netdev_priv(ndev);
132 	int ret;
133 
134 	ret = xge_port_reset(ndev);
135 	if (ret)
136 		return ret;
137 
138 	xge_port_init(ndev);
139 	pdata->nbufs = NUM_BUFS;
140 
141 	return 0;
142 }
143 
144 static irqreturn_t xge_irq(const int irq, void *data)
145 {
146 	struct xge_pdata *pdata = data;
147 
148 	if (napi_schedule_prep(&pdata->napi)) {
149 		xge_intr_disable(pdata);
150 		__napi_schedule(&pdata->napi);
151 	}
152 
153 	return IRQ_HANDLED;
154 }
155 
156 static int xge_request_irq(struct net_device *ndev)
157 {
158 	struct xge_pdata *pdata = netdev_priv(ndev);
159 	struct device *dev = &pdata->pdev->dev;
160 	int ret;
161 
162 	snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
163 
164 	ret = devm_request_irq(dev, pdata->resources.irq, xge_irq,
165 			       0, pdata->irq_name, pdata);
166 	if (ret)
167 		netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
168 
169 	return ret;
170 }
171 
172 static void xge_free_irq(struct net_device *ndev)
173 {
174 	struct xge_pdata *pdata = netdev_priv(ndev);
175 	struct device *dev = &pdata->pdev->dev;
176 
177 	devm_free_irq(dev, pdata->resources.irq, pdata);
178 }
179 
180 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
181 {
182 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
183 	    (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
184 		return true;
185 
186 	return false;
187 }
188 
189 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
190 {
191 	struct xge_pdata *pdata = netdev_priv(ndev);
192 	struct device *dev = &pdata->pdev->dev;
193 	static dma_addr_t dma_addr;
194 	struct xge_desc_ring *tx_ring;
195 	struct xge_raw_desc *raw_desc;
196 	u64 addr_lo, addr_hi;
197 	void *pkt_buf;
198 	u8 tail;
199 	u16 len;
200 
201 	tx_ring = pdata->tx_ring;
202 	tail = tx_ring->tail;
203 	len = skb_headlen(skb);
204 	raw_desc = &tx_ring->raw_desc[tail];
205 
206 	if (!is_tx_slot_available(raw_desc)) {
207 		netif_stop_queue(ndev);
208 		return NETDEV_TX_BUSY;
209 	}
210 
211 	/* Packet buffers should be 64B aligned */
212 	pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
213 				      GFP_ATOMIC);
214 	if (unlikely(!pkt_buf)) {
215 		dev_kfree_skb_any(skb);
216 		return NETDEV_TX_OK;
217 	}
218 	memcpy(pkt_buf, skb->data, len);
219 
220 	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
221 	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
222 	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
223 				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
224 				   SET_BITS(PKT_ADDRH,
225 					    upper_32_bits(dma_addr)));
226 
227 	tx_ring->pkt_info[tail].skb = skb;
228 	tx_ring->pkt_info[tail].dma_addr = dma_addr;
229 	tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
230 
231 	dma_wmb();
232 
233 	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
234 				   SET_BITS(PKT_SIZE, len) |
235 				   SET_BITS(E, 0));
236 	skb_tx_timestamp(skb);
237 	xge_wr_csr(pdata, DMATXCTRL, 1);
238 
239 	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
240 
241 	return NETDEV_TX_OK;
242 }
243 
244 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
245 {
246 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
247 	    !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
248 		return true;
249 
250 	return false;
251 }
252 
253 static void xge_txc_poll(struct net_device *ndev)
254 {
255 	struct xge_pdata *pdata = netdev_priv(ndev);
256 	struct device *dev = &pdata->pdev->dev;
257 	struct xge_desc_ring *tx_ring;
258 	struct xge_raw_desc *raw_desc;
259 	dma_addr_t dma_addr;
260 	struct sk_buff *skb;
261 	void *pkt_buf;
262 	u32 data;
263 	u8 head;
264 
265 	tx_ring = pdata->tx_ring;
266 	head = tx_ring->head;
267 
268 	data = xge_rd_csr(pdata, DMATXSTATUS);
269 	if (!GET_BITS(TXPKTCOUNT, data))
270 		return;
271 
272 	while (1) {
273 		raw_desc = &tx_ring->raw_desc[head];
274 
275 		if (!is_tx_hw_done(raw_desc))
276 			break;
277 
278 		dma_rmb();
279 
280 		skb = tx_ring->pkt_info[head].skb;
281 		dma_addr = tx_ring->pkt_info[head].dma_addr;
282 		pkt_buf = tx_ring->pkt_info[head].pkt_buf;
283 		pdata->stats.tx_packets++;
284 		pdata->stats.tx_bytes += skb->len;
285 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
286 		dev_kfree_skb_any(skb);
287 
288 		/* clear pktstart address and pktsize */
289 		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
290 					   SET_BITS(PKT_SIZE, SLOT_EMPTY));
291 		xge_wr_csr(pdata, DMATXSTATUS, 1);
292 
293 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
294 	}
295 
296 	if (netif_queue_stopped(ndev))
297 		netif_wake_queue(ndev);
298 
299 	tx_ring->head = head;
300 }
301 
302 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
303 {
304 	struct xge_pdata *pdata = netdev_priv(ndev);
305 	struct device *dev = &pdata->pdev->dev;
306 	struct xge_desc_ring *rx_ring;
307 	struct xge_raw_desc *raw_desc;
308 	struct sk_buff *skb;
309 	dma_addr_t dma_addr;
310 	int processed = 0;
311 	u8 head, rx_error;
312 	int i, ret;
313 	u32 data;
314 	u16 len;
315 
316 	rx_ring = pdata->rx_ring;
317 	head = rx_ring->head;
318 
319 	data = xge_rd_csr(pdata, DMARXSTATUS);
320 	if (!GET_BITS(RXPKTCOUNT, data))
321 		return 0;
322 
323 	for (i = 0; i < budget; i++) {
324 		raw_desc = &rx_ring->raw_desc[head];
325 
326 		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
327 			break;
328 
329 		dma_rmb();
330 
331 		skb = rx_ring->pkt_info[head].skb;
332 		rx_ring->pkt_info[head].skb = NULL;
333 		dma_addr = rx_ring->pkt_info[head].dma_addr;
334 		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
335 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
336 				 DMA_FROM_DEVICE);
337 
338 		rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
339 		if (unlikely(rx_error)) {
340 			pdata->stats.rx_errors++;
341 			dev_kfree_skb_any(skb);
342 			goto out;
343 		}
344 
345 		skb_put(skb, len);
346 		skb->protocol = eth_type_trans(skb, ndev);
347 
348 		pdata->stats.rx_packets++;
349 		pdata->stats.rx_bytes += len;
350 		napi_gro_receive(&pdata->napi, skb);
351 out:
352 		ret = xge_refill_buffers(ndev, 1);
353 		xge_wr_csr(pdata, DMARXSTATUS, 1);
354 		xge_wr_csr(pdata, DMARXCTRL, 1);
355 
356 		if (ret)
357 			break;
358 
359 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
360 		processed++;
361 	}
362 
363 	rx_ring->head = head;
364 
365 	return processed;
366 }
367 
368 static void xge_delete_desc_ring(struct net_device *ndev,
369 				 struct xge_desc_ring *ring)
370 {
371 	struct xge_pdata *pdata = netdev_priv(ndev);
372 	struct device *dev = &pdata->pdev->dev;
373 	u16 size;
374 
375 	if (!ring)
376 		return;
377 
378 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
379 	if (ring->desc_addr)
380 		dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
381 
382 	kfree(ring->pkt_info);
383 	kfree(ring);
384 }
385 
386 static void xge_free_buffers(struct net_device *ndev)
387 {
388 	struct xge_pdata *pdata = netdev_priv(ndev);
389 	struct xge_desc_ring *ring = pdata->rx_ring;
390 	struct device *dev = &pdata->pdev->dev;
391 	struct sk_buff *skb;
392 	dma_addr_t dma_addr;
393 	int i;
394 
395 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
396 		skb = ring->pkt_info[i].skb;
397 		dma_addr = ring->pkt_info[i].dma_addr;
398 
399 		if (!skb)
400 			continue;
401 
402 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
403 				 DMA_FROM_DEVICE);
404 		dev_kfree_skb_any(skb);
405 	}
406 }
407 
408 static void xge_delete_desc_rings(struct net_device *ndev)
409 {
410 	struct xge_pdata *pdata = netdev_priv(ndev);
411 
412 	xge_txc_poll(ndev);
413 	xge_delete_desc_ring(ndev, pdata->tx_ring);
414 
415 	xge_rx_poll(ndev, 64);
416 	xge_free_buffers(ndev);
417 	xge_delete_desc_ring(ndev, pdata->rx_ring);
418 }
419 
420 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
421 {
422 	struct xge_pdata *pdata = netdev_priv(ndev);
423 	struct device *dev = &pdata->pdev->dev;
424 	struct xge_desc_ring *ring;
425 	u16 size;
426 
427 	ring = kzalloc(sizeof(struct xge_desc_ring), GFP_KERNEL);
428 	if (!ring)
429 		return NULL;
430 
431 	ring->ndev = ndev;
432 
433 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
434 	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
435 					      GFP_KERNEL);
436 	if (!ring->desc_addr)
437 		goto err;
438 
439 	ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(struct pkt_info),
440 				 GFP_KERNEL);
441 	if (!ring->pkt_info)
442 		goto err;
443 
444 	xge_setup_desc(ring);
445 
446 	return ring;
447 
448 err:
449 	xge_delete_desc_ring(ndev, ring);
450 
451 	return NULL;
452 }
453 
454 static int xge_create_desc_rings(struct net_device *ndev)
455 {
456 	struct xge_pdata *pdata = netdev_priv(ndev);
457 	struct xge_desc_ring *ring;
458 	int ret;
459 
460 	/* create tx ring */
461 	ring = xge_create_desc_ring(ndev);
462 	if (!ring)
463 		goto err;
464 
465 	pdata->tx_ring = ring;
466 	xge_update_tx_desc_addr(pdata);
467 
468 	/* create rx ring */
469 	ring = xge_create_desc_ring(ndev);
470 	if (!ring)
471 		goto err;
472 
473 	pdata->rx_ring = ring;
474 	xge_update_rx_desc_addr(pdata);
475 
476 	ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
477 	if (ret)
478 		goto err;
479 
480 	return 0;
481 err:
482 	xge_delete_desc_rings(ndev);
483 
484 	return -ENOMEM;
485 }
486 
487 static int xge_open(struct net_device *ndev)
488 {
489 	struct xge_pdata *pdata = netdev_priv(ndev);
490 	int ret;
491 
492 	ret = xge_create_desc_rings(ndev);
493 	if (ret)
494 		return ret;
495 
496 	napi_enable(&pdata->napi);
497 	ret = xge_request_irq(ndev);
498 	if (ret)
499 		return ret;
500 
501 	xge_intr_enable(pdata);
502 	xge_wr_csr(pdata, DMARXCTRL, 1);
503 	xge_mac_enable(pdata);
504 	netif_start_queue(ndev);
505 	netif_carrier_on(ndev);
506 
507 	return 0;
508 }
509 
510 static int xge_close(struct net_device *ndev)
511 {
512 	struct xge_pdata *pdata = netdev_priv(ndev);
513 
514 	netif_carrier_off(ndev);
515 	netif_stop_queue(ndev);
516 	xge_mac_disable(pdata);
517 
518 	xge_intr_disable(pdata);
519 	xge_free_irq(ndev);
520 	napi_disable(&pdata->napi);
521 	xge_delete_desc_rings(ndev);
522 
523 	return 0;
524 }
525 
526 static int xge_napi(struct napi_struct *napi, const int budget)
527 {
528 	struct net_device *ndev = napi->dev;
529 	struct xge_pdata *pdata = netdev_priv(ndev);
530 	int processed;
531 
532 	pdata = netdev_priv(ndev);
533 
534 	xge_txc_poll(ndev);
535 	processed = xge_rx_poll(ndev, budget);
536 
537 	if (processed < budget) {
538 		napi_complete_done(napi, processed);
539 		xge_intr_enable(pdata);
540 	}
541 
542 	return processed;
543 }
544 
545 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
546 {
547 	struct xge_pdata *pdata = netdev_priv(ndev);
548 	int ret;
549 
550 	ret = eth_mac_addr(ndev, addr);
551 	if (ret)
552 		return ret;
553 
554 	xge_mac_set_station_addr(pdata);
555 
556 	return 0;
557 }
558 
559 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
560 {
561 	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
562 		return true;
563 
564 	return false;
565 }
566 
567 static void xge_free_pending_skb(struct net_device *ndev)
568 {
569 	struct xge_pdata *pdata = netdev_priv(ndev);
570 	struct device *dev = &pdata->pdev->dev;
571 	struct xge_desc_ring *tx_ring;
572 	struct xge_raw_desc *raw_desc;
573 	dma_addr_t dma_addr;
574 	struct sk_buff *skb;
575 	void *pkt_buf;
576 	int i;
577 
578 	tx_ring = pdata->tx_ring;
579 
580 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
581 		raw_desc = &tx_ring->raw_desc[i];
582 
583 		if (!is_tx_pending(raw_desc))
584 			continue;
585 
586 		skb = tx_ring->pkt_info[i].skb;
587 		dma_addr = tx_ring->pkt_info[i].dma_addr;
588 		pkt_buf = tx_ring->pkt_info[i].pkt_buf;
589 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
590 		dev_kfree_skb_any(skb);
591 	}
592 }
593 
594 static void xge_timeout(struct net_device *ndev)
595 {
596 	struct xge_pdata *pdata = netdev_priv(ndev);
597 
598 	rtnl_lock();
599 
600 	if (netif_running(ndev)) {
601 		netif_carrier_off(ndev);
602 		netif_stop_queue(ndev);
603 		xge_intr_disable(pdata);
604 		napi_disable(&pdata->napi);
605 
606 		xge_wr_csr(pdata, DMATXCTRL, 0);
607 		xge_txc_poll(ndev);
608 		xge_free_pending_skb(ndev);
609 		xge_wr_csr(pdata, DMATXSTATUS, ~0U);
610 
611 		xge_setup_desc(pdata->tx_ring);
612 		xge_update_tx_desc_addr(pdata);
613 		xge_mac_init(pdata);
614 
615 		napi_enable(&pdata->napi);
616 		xge_intr_enable(pdata);
617 		xge_mac_enable(pdata);
618 		netif_start_queue(ndev);
619 		netif_carrier_on(ndev);
620 	}
621 
622 	rtnl_unlock();
623 }
624 
625 static void xge_get_stats64(struct net_device *ndev,
626 			    struct rtnl_link_stats64 *storage)
627 {
628 	struct xge_pdata *pdata = netdev_priv(ndev);
629 	struct xge_stats *stats = &pdata->stats;
630 
631 	storage->tx_packets += stats->tx_packets;
632 	storage->tx_bytes += stats->tx_bytes;
633 
634 	storage->rx_packets += stats->rx_packets;
635 	storage->rx_bytes += stats->rx_bytes;
636 	storage->rx_errors += stats->rx_errors;
637 }
638 
639 static const struct net_device_ops xgene_ndev_ops = {
640 	.ndo_open = xge_open,
641 	.ndo_stop = xge_close,
642 	.ndo_start_xmit = xge_start_xmit,
643 	.ndo_set_mac_address = xge_set_mac_addr,
644 	.ndo_tx_timeout = xge_timeout,
645 	.ndo_get_stats64 = xge_get_stats64,
646 };
647 
648 static int xge_probe(struct platform_device *pdev)
649 {
650 	struct device *dev = &pdev->dev;
651 	struct net_device *ndev;
652 	struct xge_pdata *pdata;
653 	int ret;
654 
655 	ndev = alloc_etherdev(sizeof(struct xge_pdata));
656 	if (!ndev)
657 		return -ENOMEM;
658 
659 	pdata = netdev_priv(ndev);
660 
661 	pdata->pdev = pdev;
662 	pdata->ndev = ndev;
663 	SET_NETDEV_DEV(ndev, dev);
664 	platform_set_drvdata(pdev, pdata);
665 	ndev->netdev_ops = &xgene_ndev_ops;
666 
667 	ndev->features |= NETIF_F_GSO |
668 			  NETIF_F_GRO;
669 
670 	ret = xge_get_resources(pdata);
671 	if (ret)
672 		goto err;
673 
674 	ndev->hw_features = ndev->features;
675 
676 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
677 	if (ret) {
678 		netdev_err(ndev, "No usable DMA configuration\n");
679 		goto err;
680 	}
681 
682 	ret = xge_init_hw(ndev);
683 	if (ret)
684 		goto err;
685 
686 	netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
687 
688 	netif_carrier_off(ndev);
689 	ret = register_netdev(ndev);
690 	if (ret) {
691 		netdev_err(ndev, "Failed to register netdev\n");
692 		goto err;
693 	}
694 
695 	return 0;
696 
697 err:
698 	free_netdev(ndev);
699 
700 	return ret;
701 }
702 
703 static int xge_remove(struct platform_device *pdev)
704 {
705 	struct xge_pdata *pdata;
706 	struct net_device *ndev;
707 
708 	pdata = platform_get_drvdata(pdev);
709 	ndev = pdata->ndev;
710 
711 	rtnl_lock();
712 	if (netif_running(ndev))
713 		dev_close(ndev);
714 	rtnl_unlock();
715 
716 	unregister_netdev(ndev);
717 	free_netdev(ndev);
718 
719 	return 0;
720 }
721 
722 static void xge_shutdown(struct platform_device *pdev)
723 {
724 	struct xge_pdata *pdata;
725 
726 	pdata = platform_get_drvdata(pdev);
727 	if (!pdata)
728 		return;
729 
730 	if (!pdata->ndev)
731 		return;
732 
733 	xge_remove(pdev);
734 }
735 
736 static const struct acpi_device_id xge_acpi_match[] = {
737 	{ "APMC0D80" },
738 	{ }
739 };
740 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
741 
742 static struct platform_driver xge_driver = {
743 	.driver = {
744 		   .name = "xgene-enet-v2",
745 		   .acpi_match_table = ACPI_PTR(xge_acpi_match),
746 	},
747 	.probe = xge_probe,
748 	.remove = xge_remove,
749 	.shutdown = xge_shutdown,
750 };
751 module_platform_driver(xge_driver);
752 
753 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
754 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
755 MODULE_VERSION(XGENE_ENET_V2_VERSION);
756 MODULE_LICENSE("GPL");
757