1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
3 
4 /* TSN endpoint Ethernet MAC driver
5  *
6  * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7  * communication. It is designed for endpoints within TSN (Time Sensitive
8  * Networking) networks; e.g., for PLCs in the industrial automation case.
9  *
10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
11  * by the driver.
12  *
13  * More information can be found here:
14  * - www.embedded-experts.at/tsn
15  * - www.engleder-embedded.com
16  */
17 
18 #include "tsnep.h"
19 #include "tsnep_hw.h"
20 
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
29 
30 #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
31 				TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
32 #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
33 #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
34 
35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
37 #else
38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
39 #endif
40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
41 
42 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
43 {
44 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
45 }
46 
47 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
48 {
49 	mask |= ECM_INT_DISABLE;
50 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
51 }
52 
53 static irqreturn_t tsnep_irq(int irq, void *arg)
54 {
55 	struct tsnep_adapter *adapter = arg;
56 	u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
57 
58 	/* acknowledge interrupt */
59 	if (active != 0)
60 		iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
61 
62 	/* handle link interrupt */
63 	if ((active & ECM_INT_LINK) != 0) {
64 		if (adapter->netdev->phydev)
65 			phy_mac_interrupt(adapter->netdev->phydev);
66 	}
67 
68 	/* handle TX/RX queue 0 interrupt */
69 	if ((active & adapter->queue[0].irq_mask) != 0) {
70 		if (adapter->netdev) {
71 			tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
72 			napi_schedule(&adapter->queue[0].napi);
73 		}
74 	}
75 
76 	return IRQ_HANDLED;
77 }
78 
79 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
80 {
81 	struct tsnep_adapter *adapter = bus->priv;
82 	u32 md;
83 	int retval;
84 
85 	if (regnum & MII_ADDR_C45)
86 		return -EOPNOTSUPP;
87 
88 	md = ECM_MD_READ;
89 	if (!adapter->suppress_preamble)
90 		md |= ECM_MD_PREAMBLE;
91 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
92 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
93 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
94 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
95 					   !(md & ECM_MD_BUSY), 16, 1000);
96 	if (retval != 0)
97 		return retval;
98 
99 	return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
100 }
101 
102 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
103 			       u16 val)
104 {
105 	struct tsnep_adapter *adapter = bus->priv;
106 	u32 md;
107 	int retval;
108 
109 	if (regnum & MII_ADDR_C45)
110 		return -EOPNOTSUPP;
111 
112 	md = ECM_MD_WRITE;
113 	if (!adapter->suppress_preamble)
114 		md |= ECM_MD_PREAMBLE;
115 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
116 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
117 	md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
118 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
119 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
120 					   !(md & ECM_MD_BUSY), 16, 1000);
121 	if (retval != 0)
122 		return retval;
123 
124 	return 0;
125 }
126 
127 static void tsnep_phy_link_status_change(struct net_device *netdev)
128 {
129 	struct tsnep_adapter *adapter = netdev_priv(netdev);
130 	struct phy_device *phydev = netdev->phydev;
131 	u32 mode;
132 
133 	if (phydev->link) {
134 		switch (phydev->speed) {
135 		case SPEED_100:
136 			mode = ECM_LINK_MODE_100;
137 			break;
138 		case SPEED_1000:
139 			mode = ECM_LINK_MODE_1000;
140 			break;
141 		default:
142 			mode = ECM_LINK_MODE_OFF;
143 			break;
144 		}
145 		iowrite32(mode, adapter->addr + ECM_STATUS);
146 	}
147 
148 	phy_print_status(netdev->phydev);
149 }
150 
151 static int tsnep_phy_open(struct tsnep_adapter *adapter)
152 {
153 	struct phy_device *phydev;
154 	struct ethtool_eee ethtool_eee;
155 	int retval;
156 
157 	retval = phy_connect_direct(adapter->netdev, adapter->phydev,
158 				    tsnep_phy_link_status_change,
159 				    adapter->phy_mode);
160 	if (retval)
161 		return retval;
162 	phydev = adapter->netdev->phydev;
163 
164 	/* MAC supports only 100Mbps|1000Mbps full duplex
165 	 * SPE (Single Pair Ethernet) is also an option but not implemented yet
166 	 */
167 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
168 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
169 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
170 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
171 
172 	/* disable EEE autoneg, EEE not supported by TSNEP */
173 	memset(&ethtool_eee, 0, sizeof(ethtool_eee));
174 	phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
175 
176 	adapter->phydev->irq = PHY_MAC_INTERRUPT;
177 	phy_start(adapter->phydev);
178 
179 	return 0;
180 }
181 
182 static void tsnep_phy_close(struct tsnep_adapter *adapter)
183 {
184 	phy_stop(adapter->netdev->phydev);
185 	phy_disconnect(adapter->netdev->phydev);
186 	adapter->netdev->phydev = NULL;
187 }
188 
189 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
190 {
191 	struct device *dmadev = tx->adapter->dmadev;
192 	int i;
193 
194 	memset(tx->entry, 0, sizeof(tx->entry));
195 
196 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
197 		if (tx->page[i]) {
198 			dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
199 					  tx->page_dma[i]);
200 			tx->page[i] = NULL;
201 			tx->page_dma[i] = 0;
202 		}
203 	}
204 }
205 
206 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
207 {
208 	struct device *dmadev = tx->adapter->dmadev;
209 	struct tsnep_tx_entry *entry;
210 	struct tsnep_tx_entry *next_entry;
211 	int i, j;
212 	int retval;
213 
214 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
215 		tx->page[i] =
216 			dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
217 					   GFP_KERNEL);
218 		if (!tx->page[i]) {
219 			retval = -ENOMEM;
220 			goto alloc_failed;
221 		}
222 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
223 			entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
224 			entry->desc_wb = (struct tsnep_tx_desc_wb *)
225 				(((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
226 			entry->desc = (struct tsnep_tx_desc *)
227 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
228 			entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
229 		}
230 	}
231 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
232 		entry = &tx->entry[i];
233 		next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
234 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
235 	}
236 
237 	return 0;
238 
239 alloc_failed:
240 	tsnep_tx_ring_cleanup(tx);
241 	return retval;
242 }
243 
244 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
245 {
246 	struct tsnep_tx_entry *entry = &tx->entry[index];
247 
248 	entry->properties = 0;
249 	if (entry->skb) {
250 		entry->properties =
251 			skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
252 		entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
253 		if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
254 			entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
255 
256 		/* toggle user flag to prevent false acknowledge
257 		 *
258 		 * Only the first fragment is acknowledged. For all other
259 		 * fragments no acknowledge is done and the last written owner
260 		 * counter stays in the writeback descriptor. Therefore, it is
261 		 * possible that the last written owner counter is identical to
262 		 * the new incremented owner counter and a false acknowledge is
263 		 * detected before the real acknowledge has been done by
264 		 * hardware.
265 		 *
266 		 * The user flag is used to prevent this situation. The user
267 		 * flag is copied to the writeback descriptor by the hardware
268 		 * and is used as additional acknowledge data. By toggeling the
269 		 * user flag only for the first fragment (which is
270 		 * acknowledged), it is guaranteed that the last acknowledge
271 		 * done for this descriptor has used a different user flag and
272 		 * cannot be detected as false acknowledge.
273 		 */
274 		entry->owner_user_flag = !entry->owner_user_flag;
275 	}
276 	if (last)
277 		entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
278 	if (index == tx->increment_owner_counter) {
279 		tx->owner_counter++;
280 		if (tx->owner_counter == 4)
281 			tx->owner_counter = 1;
282 		tx->increment_owner_counter--;
283 		if (tx->increment_owner_counter < 0)
284 			tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
285 	}
286 	entry->properties |=
287 		(tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
288 		TSNEP_DESC_OWNER_COUNTER_MASK;
289 	if (entry->owner_user_flag)
290 		entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
291 	entry->desc->more_properties =
292 		__cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
293 
294 	/* descriptor properties shall be written last, because valid data is
295 	 * signaled there
296 	 */
297 	dma_wmb();
298 
299 	entry->desc->properties = __cpu_to_le32(entry->properties);
300 }
301 
302 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
303 {
304 	if (tx->read <= tx->write)
305 		return TSNEP_RING_SIZE - tx->write + tx->read - 1;
306 	else
307 		return tx->read - tx->write - 1;
308 }
309 
310 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
311 {
312 	struct device *dmadev = tx->adapter->dmadev;
313 	struct tsnep_tx_entry *entry;
314 	unsigned int len;
315 	dma_addr_t dma;
316 	int i;
317 
318 	for (i = 0; i < count; i++) {
319 		entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
320 
321 		if (i == 0) {
322 			len = skb_headlen(skb);
323 			dma = dma_map_single(dmadev, skb->data, len,
324 					     DMA_TO_DEVICE);
325 		} else {
326 			len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
327 			dma = skb_frag_dma_map(dmadev,
328 					       &skb_shinfo(skb)->frags[i - 1],
329 					       0, len, DMA_TO_DEVICE);
330 		}
331 		if (dma_mapping_error(dmadev, dma))
332 			return -ENOMEM;
333 
334 		entry->len = len;
335 		dma_unmap_addr_set(entry, dma, dma);
336 
337 		entry->desc->tx = __cpu_to_le64(dma);
338 	}
339 
340 	return 0;
341 }
342 
343 static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
344 {
345 	struct device *dmadev = tx->adapter->dmadev;
346 	struct tsnep_tx_entry *entry;
347 	int i;
348 
349 	for (i = 0; i < count; i++) {
350 		entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
351 
352 		if (entry->len) {
353 			if (i == 0)
354 				dma_unmap_single(dmadev,
355 						 dma_unmap_addr(entry, dma),
356 						 dma_unmap_len(entry, len),
357 						 DMA_TO_DEVICE);
358 			else
359 				dma_unmap_page(dmadev,
360 					       dma_unmap_addr(entry, dma),
361 					       dma_unmap_len(entry, len),
362 					       DMA_TO_DEVICE);
363 			entry->len = 0;
364 		}
365 	}
366 }
367 
368 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
369 					 struct tsnep_tx *tx)
370 {
371 	unsigned long flags;
372 	int count = 1;
373 	struct tsnep_tx_entry *entry;
374 	int i;
375 	int retval;
376 
377 	if (skb_shinfo(skb)->nr_frags > 0)
378 		count += skb_shinfo(skb)->nr_frags;
379 
380 	spin_lock_irqsave(&tx->lock, flags);
381 
382 	if (tsnep_tx_desc_available(tx) < count) {
383 		/* ring full, shall not happen because queue is stopped if full
384 		 * below
385 		 */
386 		netif_stop_queue(tx->adapter->netdev);
387 
388 		spin_unlock_irqrestore(&tx->lock, flags);
389 
390 		return NETDEV_TX_BUSY;
391 	}
392 
393 	entry = &tx->entry[tx->write];
394 	entry->skb = skb;
395 
396 	retval = tsnep_tx_map(skb, tx, count);
397 	if (retval != 0) {
398 		tsnep_tx_unmap(tx, count);
399 		dev_kfree_skb_any(entry->skb);
400 		entry->skb = NULL;
401 
402 		tx->dropped++;
403 
404 		spin_unlock_irqrestore(&tx->lock, flags);
405 
406 		netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
407 
408 		return NETDEV_TX_OK;
409 	}
410 
411 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
412 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
413 
414 	for (i = 0; i < count; i++)
415 		tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
416 				  i == (count - 1));
417 	tx->write = (tx->write + count) % TSNEP_RING_SIZE;
418 
419 	skb_tx_timestamp(skb);
420 
421 	/* descriptor properties shall be valid before hardware is notified */
422 	dma_wmb();
423 
424 	iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
425 
426 	if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
427 		/* ring can get full with next frame */
428 		netif_stop_queue(tx->adapter->netdev);
429 	}
430 
431 	tx->packets++;
432 	tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
433 
434 	spin_unlock_irqrestore(&tx->lock, flags);
435 
436 	return NETDEV_TX_OK;
437 }
438 
439 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
440 {
441 	unsigned long flags;
442 	int budget = 128;
443 	struct tsnep_tx_entry *entry;
444 	int count;
445 
446 	spin_lock_irqsave(&tx->lock, flags);
447 
448 	do {
449 		if (tx->read == tx->write)
450 			break;
451 
452 		entry = &tx->entry[tx->read];
453 		if ((__le32_to_cpu(entry->desc_wb->properties) &
454 		     TSNEP_TX_DESC_OWNER_MASK) !=
455 		    (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
456 			break;
457 
458 		/* descriptor properties shall be read first, because valid data
459 		 * is signaled there
460 		 */
461 		dma_rmb();
462 
463 		count = 1;
464 		if (skb_shinfo(entry->skb)->nr_frags > 0)
465 			count += skb_shinfo(entry->skb)->nr_frags;
466 
467 		tsnep_tx_unmap(tx, count);
468 
469 		if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
470 		    (__le32_to_cpu(entry->desc_wb->properties) &
471 		     TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
472 			struct skb_shared_hwtstamps hwtstamps;
473 			u64 timestamp =
474 				__le64_to_cpu(entry->desc_wb->timestamp);
475 
476 			memset(&hwtstamps, 0, sizeof(hwtstamps));
477 			hwtstamps.hwtstamp = ns_to_ktime(timestamp);
478 
479 			skb_tstamp_tx(entry->skb, &hwtstamps);
480 		}
481 
482 		napi_consume_skb(entry->skb, budget);
483 		entry->skb = NULL;
484 
485 		tx->read = (tx->read + count) % TSNEP_RING_SIZE;
486 
487 		budget--;
488 	} while (likely(budget));
489 
490 	if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
491 	    netif_queue_stopped(tx->adapter->netdev)) {
492 		netif_wake_queue(tx->adapter->netdev);
493 	}
494 
495 	spin_unlock_irqrestore(&tx->lock, flags);
496 
497 	return (budget != 0);
498 }
499 
500 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
501 			 struct tsnep_tx *tx)
502 {
503 	dma_addr_t dma;
504 	int retval;
505 
506 	memset(tx, 0, sizeof(*tx));
507 	tx->adapter = adapter;
508 	tx->addr = addr;
509 
510 	retval = tsnep_tx_ring_init(tx);
511 	if (retval)
512 		return retval;
513 
514 	dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
515 	iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
516 	iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
517 	tx->owner_counter = 1;
518 	tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
519 
520 	spin_lock_init(&tx->lock);
521 
522 	return 0;
523 }
524 
525 static void tsnep_tx_close(struct tsnep_tx *tx)
526 {
527 	u32 val;
528 
529 	readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
530 			   ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
531 			   1000000);
532 
533 	tsnep_tx_ring_cleanup(tx);
534 }
535 
536 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
537 {
538 	struct device *dmadev = rx->adapter->dmadev;
539 	struct tsnep_rx_entry *entry;
540 	int i;
541 
542 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
543 		entry = &rx->entry[i];
544 		if (dma_unmap_addr(entry, dma))
545 			dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
546 					 dma_unmap_len(entry, len),
547 					 DMA_FROM_DEVICE);
548 		if (entry->skb)
549 			dev_kfree_skb(entry->skb);
550 	}
551 
552 	memset(rx->entry, 0, sizeof(rx->entry));
553 
554 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
555 		if (rx->page[i]) {
556 			dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
557 					  rx->page_dma[i]);
558 			rx->page[i] = NULL;
559 			rx->page_dma[i] = 0;
560 		}
561 	}
562 }
563 
564 static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
565 				      struct tsnep_rx_entry *entry)
566 {
567 	struct device *dmadev = rx->adapter->dmadev;
568 	struct sk_buff *skb;
569 	dma_addr_t dma;
570 
571 	skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
572 				 GFP_ATOMIC | GFP_DMA);
573 	if (!skb)
574 		return -ENOMEM;
575 
576 	skb_reserve(skb, RX_SKB_RESERVE);
577 
578 	dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
579 			     DMA_FROM_DEVICE);
580 	if (dma_mapping_error(dmadev, dma)) {
581 		dev_kfree_skb(skb);
582 		return -ENOMEM;
583 	}
584 
585 	entry->skb = skb;
586 	entry->len = RX_SKB_LENGTH;
587 	dma_unmap_addr_set(entry, dma, dma);
588 	entry->desc->rx = __cpu_to_le64(dma);
589 
590 	return 0;
591 }
592 
593 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
594 {
595 	struct device *dmadev = rx->adapter->dmadev;
596 	struct tsnep_rx_entry *entry;
597 	struct tsnep_rx_entry *next_entry;
598 	int i, j;
599 	int retval;
600 
601 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
602 		rx->page[i] =
603 			dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
604 					   GFP_KERNEL);
605 		if (!rx->page[i]) {
606 			retval = -ENOMEM;
607 			goto failed;
608 		}
609 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
610 			entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
611 			entry->desc_wb = (struct tsnep_rx_desc_wb *)
612 				(((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
613 			entry->desc = (struct tsnep_rx_desc *)
614 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
615 			entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
616 		}
617 	}
618 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
619 		entry = &rx->entry[i];
620 		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
621 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
622 
623 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
624 		if (retval)
625 			goto failed;
626 	}
627 
628 	return 0;
629 
630 failed:
631 	tsnep_rx_ring_cleanup(rx);
632 	return retval;
633 }
634 
635 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
636 {
637 	struct tsnep_rx_entry *entry = &rx->entry[index];
638 
639 	/* RX_SKB_LENGTH is a multiple of 4 */
640 	entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
641 	entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
642 	if (index == rx->increment_owner_counter) {
643 		rx->owner_counter++;
644 		if (rx->owner_counter == 4)
645 			rx->owner_counter = 1;
646 		rx->increment_owner_counter--;
647 		if (rx->increment_owner_counter < 0)
648 			rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
649 	}
650 	entry->properties |=
651 		(rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
652 		TSNEP_DESC_OWNER_COUNTER_MASK;
653 
654 	/* descriptor properties shall be written last, because valid data is
655 	 * signaled there
656 	 */
657 	dma_wmb();
658 
659 	entry->desc->properties = __cpu_to_le32(entry->properties);
660 }
661 
662 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
663 			 int budget)
664 {
665 	struct device *dmadev = rx->adapter->dmadev;
666 	int done = 0;
667 	struct tsnep_rx_entry *entry;
668 	struct sk_buff *skb;
669 	size_t len;
670 	dma_addr_t dma;
671 	int length;
672 	bool enable = false;
673 	int retval;
674 
675 	while (likely(done < budget)) {
676 		entry = &rx->entry[rx->read];
677 		if ((__le32_to_cpu(entry->desc_wb->properties) &
678 		     TSNEP_DESC_OWNER_COUNTER_MASK) !=
679 		    (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
680 			break;
681 
682 		/* descriptor properties shall be read first, because valid data
683 		 * is signaled there
684 		 */
685 		dma_rmb();
686 
687 		skb = entry->skb;
688 		len = dma_unmap_len(entry, len);
689 		dma = dma_unmap_addr(entry, dma);
690 
691 		/* forward skb only if allocation is successful, otherwise
692 		 * skb is reused and frame dropped
693 		 */
694 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
695 		if (!retval) {
696 			dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
697 
698 			length = __le32_to_cpu(entry->desc_wb->properties) &
699 				 TSNEP_DESC_LENGTH_MASK;
700 			skb_put(skb, length - ETH_FCS_LEN);
701 			if (rx->adapter->hwtstamp_config.rx_filter ==
702 			    HWTSTAMP_FILTER_ALL) {
703 				struct skb_shared_hwtstamps *hwtstamps =
704 					skb_hwtstamps(skb);
705 				struct tsnep_rx_inline *rx_inline =
706 					(struct tsnep_rx_inline *)skb->data;
707 				u64 timestamp =
708 					__le64_to_cpu(rx_inline->timestamp);
709 
710 				memset(hwtstamps, 0, sizeof(*hwtstamps));
711 				hwtstamps->hwtstamp = ns_to_ktime(timestamp);
712 			}
713 			skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
714 			skb->protocol = eth_type_trans(skb,
715 						       rx->adapter->netdev);
716 
717 			rx->packets++;
718 			rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
719 			if (skb->pkt_type == PACKET_MULTICAST)
720 				rx->multicast++;
721 
722 			napi_gro_receive(napi, skb);
723 			done++;
724 		} else {
725 			rx->dropped++;
726 		}
727 
728 		tsnep_rx_activate(rx, rx->read);
729 
730 		enable = true;
731 
732 		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
733 	}
734 
735 	if (enable) {
736 		/* descriptor properties shall be valid before hardware is
737 		 * notified
738 		 */
739 		dma_wmb();
740 
741 		iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
742 	}
743 
744 	return done;
745 }
746 
747 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
748 			 struct tsnep_rx *rx)
749 {
750 	dma_addr_t dma;
751 	int i;
752 	int retval;
753 
754 	memset(rx, 0, sizeof(*rx));
755 	rx->adapter = adapter;
756 	rx->addr = addr;
757 
758 	retval = tsnep_rx_ring_init(rx);
759 	if (retval)
760 		return retval;
761 
762 	dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
763 	iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
764 	iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
765 	rx->owner_counter = 1;
766 	rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
767 
768 	for (i = 0; i < TSNEP_RING_SIZE; i++)
769 		tsnep_rx_activate(rx, i);
770 
771 	/* descriptor properties shall be valid before hardware is notified */
772 	dma_wmb();
773 
774 	iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
775 
776 	return 0;
777 }
778 
779 static void tsnep_rx_close(struct tsnep_rx *rx)
780 {
781 	u32 val;
782 
783 	iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
784 	readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
785 			   ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
786 			   1000000);
787 
788 	tsnep_rx_ring_cleanup(rx);
789 }
790 
791 static int tsnep_poll(struct napi_struct *napi, int budget)
792 {
793 	struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
794 						 napi);
795 	bool complete = true;
796 	int done = 0;
797 
798 	if (queue->tx)
799 		complete = tsnep_tx_poll(queue->tx, budget);
800 
801 	if (queue->rx) {
802 		done = tsnep_rx_poll(queue->rx, napi, budget);
803 		if (done >= budget)
804 			complete = false;
805 	}
806 
807 	/* if all work not completed, return budget and keep polling */
808 	if (!complete)
809 		return budget;
810 
811 	if (likely(napi_complete_done(napi, done)))
812 		tsnep_enable_irq(queue->adapter, queue->irq_mask);
813 
814 	return min(done, budget - 1);
815 }
816 
817 static int tsnep_netdev_open(struct net_device *netdev)
818 {
819 	struct tsnep_adapter *adapter = netdev_priv(netdev);
820 	int i;
821 	void __iomem *addr;
822 	int tx_queue_index = 0;
823 	int rx_queue_index = 0;
824 	int retval;
825 
826 	retval = tsnep_phy_open(adapter);
827 	if (retval)
828 		return retval;
829 
830 	for (i = 0; i < adapter->num_queues; i++) {
831 		adapter->queue[i].adapter = adapter;
832 		if (adapter->queue[i].tx) {
833 			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
834 			retval = tsnep_tx_open(adapter, addr,
835 					       adapter->queue[i].tx);
836 			if (retval)
837 				goto failed;
838 			tx_queue_index++;
839 		}
840 		if (adapter->queue[i].rx) {
841 			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
842 			retval = tsnep_rx_open(adapter, addr,
843 					       adapter->queue[i].rx);
844 			if (retval)
845 				goto failed;
846 			rx_queue_index++;
847 		}
848 	}
849 
850 	retval = netif_set_real_num_tx_queues(adapter->netdev,
851 					      adapter->num_tx_queues);
852 	if (retval)
853 		goto failed;
854 	retval = netif_set_real_num_rx_queues(adapter->netdev,
855 					      adapter->num_rx_queues);
856 	if (retval)
857 		goto failed;
858 
859 	for (i = 0; i < adapter->num_queues; i++) {
860 		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
861 			       tsnep_poll, 64);
862 		napi_enable(&adapter->queue[i].napi);
863 
864 		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
865 	}
866 
867 	return 0;
868 
869 failed:
870 	for (i = 0; i < adapter->num_queues; i++) {
871 		if (adapter->queue[i].rx)
872 			tsnep_rx_close(adapter->queue[i].rx);
873 		if (adapter->queue[i].tx)
874 			tsnep_tx_close(adapter->queue[i].tx);
875 	}
876 	tsnep_phy_close(adapter);
877 	return retval;
878 }
879 
880 static int tsnep_netdev_close(struct net_device *netdev)
881 {
882 	struct tsnep_adapter *adapter = netdev_priv(netdev);
883 	int i;
884 
885 	for (i = 0; i < adapter->num_queues; i++) {
886 		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
887 
888 		napi_disable(&adapter->queue[i].napi);
889 		netif_napi_del(&adapter->queue[i].napi);
890 
891 		if (adapter->queue[i].rx)
892 			tsnep_rx_close(adapter->queue[i].rx);
893 		if (adapter->queue[i].tx)
894 			tsnep_tx_close(adapter->queue[i].tx);
895 	}
896 
897 	tsnep_phy_close(adapter);
898 
899 	return 0;
900 }
901 
902 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
903 					   struct net_device *netdev)
904 {
905 	struct tsnep_adapter *adapter = netdev_priv(netdev);
906 	u16 queue_mapping = skb_get_queue_mapping(skb);
907 
908 	if (queue_mapping >= adapter->num_tx_queues)
909 		queue_mapping = 0;
910 
911 	return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
912 }
913 
914 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
915 			      int cmd)
916 {
917 	if (!netif_running(netdev))
918 		return -EINVAL;
919 	if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
920 		return tsnep_ptp_ioctl(netdev, ifr, cmd);
921 	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
922 }
923 
924 static void tsnep_netdev_set_multicast(struct net_device *netdev)
925 {
926 	struct tsnep_adapter *adapter = netdev_priv(netdev);
927 
928 	u16 rx_filter = 0;
929 
930 	/* configured MAC address and broadcasts are never filtered */
931 	if (netdev->flags & IFF_PROMISC) {
932 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
933 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
934 	} else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
935 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
936 	}
937 	iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
938 }
939 
940 static void tsnep_netdev_get_stats64(struct net_device *netdev,
941 				     struct rtnl_link_stats64 *stats)
942 {
943 	struct tsnep_adapter *adapter = netdev_priv(netdev);
944 	u32 reg;
945 	u32 val;
946 	int i;
947 
948 	for (i = 0; i < adapter->num_tx_queues; i++) {
949 		stats->tx_packets += adapter->tx[i].packets;
950 		stats->tx_bytes += adapter->tx[i].bytes;
951 		stats->tx_dropped += adapter->tx[i].dropped;
952 	}
953 	for (i = 0; i < adapter->num_rx_queues; i++) {
954 		stats->rx_packets += adapter->rx[i].packets;
955 		stats->rx_bytes += adapter->rx[i].bytes;
956 		stats->rx_dropped += adapter->rx[i].dropped;
957 		stats->multicast += adapter->rx[i].multicast;
958 
959 		reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
960 			       TSNEP_RX_STATISTIC);
961 		val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
962 		      TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
963 		stats->rx_dropped += val;
964 		val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
965 		      TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
966 		stats->rx_dropped += val;
967 		val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
968 		      TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
969 		stats->rx_errors += val;
970 		stats->rx_fifo_errors += val;
971 		val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
972 		      TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
973 		stats->rx_errors += val;
974 		stats->rx_frame_errors += val;
975 	}
976 
977 	reg = ioread32(adapter->addr + ECM_STAT);
978 	val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
979 	stats->rx_errors += val;
980 	val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
981 	stats->rx_errors += val;
982 	stats->rx_crc_errors += val;
983 	val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
984 	stats->rx_errors += val;
985 }
986 
987 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
988 {
989 	iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
990 	iowrite16(*(u16 *)(addr + sizeof(u32)),
991 		  adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
992 
993 	ether_addr_copy(adapter->mac_address, addr);
994 	netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
995 		   addr);
996 }
997 
998 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
999 {
1000 	struct tsnep_adapter *adapter = netdev_priv(netdev);
1001 	struct sockaddr *sock_addr = addr;
1002 	int retval;
1003 
1004 	retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1005 	if (retval)
1006 		return retval;
1007 	eth_hw_addr_set(netdev, sock_addr->sa_data);
1008 	tsnep_mac_set_address(adapter, sock_addr->sa_data);
1009 
1010 	return 0;
1011 }
1012 
1013 static const struct net_device_ops tsnep_netdev_ops = {
1014 	.ndo_open = tsnep_netdev_open,
1015 	.ndo_stop = tsnep_netdev_close,
1016 	.ndo_start_xmit = tsnep_netdev_xmit_frame,
1017 	.ndo_eth_ioctl = tsnep_netdev_ioctl,
1018 	.ndo_set_rx_mode = tsnep_netdev_set_multicast,
1019 
1020 	.ndo_get_stats64 = tsnep_netdev_get_stats64,
1021 	.ndo_set_mac_address = tsnep_netdev_set_mac_address,
1022 	.ndo_setup_tc = tsnep_tc_setup,
1023 };
1024 
1025 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1026 {
1027 	int retval;
1028 
1029 	/* initialize RX filtering, at least configured MAC address and
1030 	 * broadcast are not filtered
1031 	 */
1032 	iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1033 
1034 	/* try to get MAC address in the following order:
1035 	 * - device tree
1036 	 * - valid MAC address already set
1037 	 * - MAC address register if valid
1038 	 * - random MAC address
1039 	 */
1040 	retval = of_get_mac_address(adapter->pdev->dev.of_node,
1041 				    adapter->mac_address);
1042 	if (retval == -EPROBE_DEFER)
1043 		return retval;
1044 	if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1045 		*(u32 *)adapter->mac_address =
1046 			ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1047 		*(u16 *)(adapter->mac_address + sizeof(u32)) =
1048 			ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1049 		if (!is_valid_ether_addr(adapter->mac_address))
1050 			eth_random_addr(adapter->mac_address);
1051 	}
1052 
1053 	tsnep_mac_set_address(adapter, adapter->mac_address);
1054 	eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1055 
1056 	return 0;
1057 }
1058 
1059 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1060 {
1061 	struct device_node *np = adapter->pdev->dev.of_node;
1062 	int retval;
1063 
1064 	if (np) {
1065 		np = of_get_child_by_name(np, "mdio");
1066 		if (!np)
1067 			return 0;
1068 
1069 		adapter->suppress_preamble =
1070 			of_property_read_bool(np, "suppress-preamble");
1071 	}
1072 
1073 	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1074 	if (!adapter->mdiobus) {
1075 		retval = -ENOMEM;
1076 
1077 		goto out;
1078 	}
1079 
1080 	adapter->mdiobus->priv = (void *)adapter;
1081 	adapter->mdiobus->parent = &adapter->pdev->dev;
1082 	adapter->mdiobus->read = tsnep_mdiobus_read;
1083 	adapter->mdiobus->write = tsnep_mdiobus_write;
1084 	adapter->mdiobus->name = TSNEP "-mdiobus";
1085 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1086 		 adapter->pdev->name);
1087 
1088 	/* do not scan broadcast address */
1089 	adapter->mdiobus->phy_mask = 0x0000001;
1090 
1091 	retval = of_mdiobus_register(adapter->mdiobus, np);
1092 
1093 out:
1094 	if (np)
1095 		of_node_put(np);
1096 
1097 	return retval;
1098 }
1099 
1100 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1101 {
1102 	struct device_node *phy_node;
1103 	int retval;
1104 
1105 	retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1106 				 &adapter->phy_mode);
1107 	if (retval)
1108 		adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1109 
1110 	phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1111 				    0);
1112 	adapter->phydev = of_phy_find_device(phy_node);
1113 	of_node_put(phy_node);
1114 	if (!adapter->phydev && adapter->mdiobus)
1115 		adapter->phydev = phy_find_first(adapter->mdiobus);
1116 	if (!adapter->phydev)
1117 		return -EIO;
1118 
1119 	return 0;
1120 }
1121 
1122 static int tsnep_probe(struct platform_device *pdev)
1123 {
1124 	struct tsnep_adapter *adapter;
1125 	struct net_device *netdev;
1126 	struct resource *io;
1127 	u32 type;
1128 	int revision;
1129 	int version;
1130 	int retval;
1131 
1132 	netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1133 					 sizeof(struct tsnep_adapter),
1134 					 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1135 	if (!netdev)
1136 		return -ENODEV;
1137 	SET_NETDEV_DEV(netdev, &pdev->dev);
1138 	adapter = netdev_priv(netdev);
1139 	platform_set_drvdata(pdev, adapter);
1140 	adapter->pdev = pdev;
1141 	adapter->dmadev = &pdev->dev;
1142 	adapter->netdev = netdev;
1143 	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1144 			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1145 			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1146 
1147 	netdev->min_mtu = ETH_MIN_MTU;
1148 	netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1149 
1150 	mutex_init(&adapter->gate_control_lock);
1151 
1152 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1153 	adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1154 	if (IS_ERR(adapter->addr))
1155 		return PTR_ERR(adapter->addr);
1156 	adapter->irq = platform_get_irq(pdev, 0);
1157 	netdev->mem_start = io->start;
1158 	netdev->mem_end = io->end;
1159 	netdev->irq = adapter->irq;
1160 
1161 	type = ioread32(adapter->addr + ECM_TYPE);
1162 	revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1163 	version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1164 	adapter->gate_control = type & ECM_GATE_CONTROL;
1165 
1166 	adapter->num_tx_queues = TSNEP_QUEUES;
1167 	adapter->num_rx_queues = TSNEP_QUEUES;
1168 	adapter->num_queues = TSNEP_QUEUES;
1169 	adapter->queue[0].tx = &adapter->tx[0];
1170 	adapter->queue[0].rx = &adapter->rx[0];
1171 	adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1172 
1173 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1174 	retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
1175 				  0, TSNEP, adapter);
1176 	if (retval != 0) {
1177 		dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
1178 			adapter->irq);
1179 		return retval;
1180 	}
1181 	tsnep_enable_irq(adapter, ECM_INT_LINK);
1182 
1183 	retval = tsnep_mac_init(adapter);
1184 	if (retval)
1185 		goto mac_init_failed;
1186 
1187 	retval = tsnep_mdio_init(adapter);
1188 	if (retval)
1189 		goto mdio_init_failed;
1190 
1191 	retval = tsnep_phy_init(adapter);
1192 	if (retval)
1193 		goto phy_init_failed;
1194 
1195 	retval = tsnep_ptp_init(adapter);
1196 	if (retval)
1197 		goto ptp_init_failed;
1198 
1199 	retval = tsnep_tc_init(adapter);
1200 	if (retval)
1201 		goto tc_init_failed;
1202 
1203 	netdev->netdev_ops = &tsnep_netdev_ops;
1204 	netdev->ethtool_ops = &tsnep_ethtool_ops;
1205 	netdev->features = NETIF_F_SG;
1206 	netdev->hw_features = netdev->features;
1207 
1208 	/* carrier off reporting is important to ethtool even BEFORE open */
1209 	netif_carrier_off(netdev);
1210 
1211 	retval = register_netdev(netdev);
1212 	if (retval)
1213 		goto register_failed;
1214 
1215 	dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1216 		 revision);
1217 	if (adapter->gate_control)
1218 		dev_info(&adapter->pdev->dev, "gate control detected\n");
1219 
1220 	return 0;
1221 
1222 register_failed:
1223 	tsnep_tc_cleanup(adapter);
1224 tc_init_failed:
1225 	tsnep_ptp_cleanup(adapter);
1226 ptp_init_failed:
1227 phy_init_failed:
1228 	if (adapter->mdiobus)
1229 		mdiobus_unregister(adapter->mdiobus);
1230 mdio_init_failed:
1231 mac_init_failed:
1232 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1233 	return retval;
1234 }
1235 
1236 static int tsnep_remove(struct platform_device *pdev)
1237 {
1238 	struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1239 
1240 	unregister_netdev(adapter->netdev);
1241 
1242 	tsnep_tc_cleanup(adapter);
1243 
1244 	tsnep_ptp_cleanup(adapter);
1245 
1246 	if (adapter->mdiobus)
1247 		mdiobus_unregister(adapter->mdiobus);
1248 
1249 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1250 
1251 	return 0;
1252 }
1253 
1254 static const struct of_device_id tsnep_of_match[] = {
1255 	{ .compatible = "engleder,tsnep", },
1256 { },
1257 };
1258 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1259 
1260 static struct platform_driver tsnep_driver = {
1261 	.driver = {
1262 		.name = TSNEP,
1263 		.of_match_table = of_match_ptr(tsnep_of_match),
1264 	},
1265 	.probe = tsnep_probe,
1266 	.remove = tsnep_remove,
1267 };
1268 module_platform_driver(tsnep_driver);
1269 
1270 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1271 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1272 MODULE_LICENSE("GPL");
1273