1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
3 
4 /* TSN endpoint Ethernet MAC driver
5  *
6  * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7  * communication. It is designed for endpoints within TSN (Time Sensitive
8  * Networking) networks; e.g., for PLCs in the industrial automation case.
9  *
10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
11  * by the driver.
12  *
13  * More information can be found here:
14  * - www.embedded-experts.at/tsn
15  * - www.engleder-embedded.com
16  */
17 
18 #include "tsnep.h"
19 #include "tsnep_hw.h"
20 
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
29 
30 #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
31 				TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
32 #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
33 #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
34 
35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
37 #else
38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
39 #endif
40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
41 
42 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
43 {
44 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
45 }
46 
47 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
48 {
49 	mask |= ECM_INT_DISABLE;
50 	iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
51 }
52 
53 static irqreturn_t tsnep_irq(int irq, void *arg)
54 {
55 	struct tsnep_adapter *adapter = arg;
56 	u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
57 
58 	/* acknowledge interrupt */
59 	if (active != 0)
60 		iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
61 
62 	/* handle link interrupt */
63 	if ((active & ECM_INT_LINK) != 0) {
64 		if (adapter->netdev->phydev)
65 			phy_mac_interrupt(adapter->netdev->phydev);
66 	}
67 
68 	/* handle TX/RX queue 0 interrupt */
69 	if ((active & adapter->queue[0].irq_mask) != 0) {
70 		if (adapter->netdev) {
71 			tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
72 			napi_schedule(&adapter->queue[0].napi);
73 		}
74 	}
75 
76 	return IRQ_HANDLED;
77 }
78 
79 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
80 {
81 	struct tsnep_adapter *adapter = bus->priv;
82 	u32 md;
83 	int retval;
84 
85 	if (regnum & MII_ADDR_C45)
86 		return -EOPNOTSUPP;
87 
88 	md = ECM_MD_READ;
89 	if (!adapter->suppress_preamble)
90 		md |= ECM_MD_PREAMBLE;
91 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
92 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
93 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
94 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
95 					   !(md & ECM_MD_BUSY), 16, 1000);
96 	if (retval != 0)
97 		return retval;
98 
99 	return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
100 }
101 
102 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
103 			       u16 val)
104 {
105 	struct tsnep_adapter *adapter = bus->priv;
106 	u32 md;
107 	int retval;
108 
109 	if (regnum & MII_ADDR_C45)
110 		return -EOPNOTSUPP;
111 
112 	md = ECM_MD_WRITE;
113 	if (!adapter->suppress_preamble)
114 		md |= ECM_MD_PREAMBLE;
115 	md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
116 	md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
117 	md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
118 	iowrite32(md, adapter->addr + ECM_MD_CONTROL);
119 	retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
120 					   !(md & ECM_MD_BUSY), 16, 1000);
121 	if (retval != 0)
122 		return retval;
123 
124 	return 0;
125 }
126 
127 static void tsnep_phy_link_status_change(struct net_device *netdev)
128 {
129 	struct tsnep_adapter *adapter = netdev_priv(netdev);
130 	struct phy_device *phydev = netdev->phydev;
131 	u32 mode;
132 
133 	if (phydev->link) {
134 		switch (phydev->speed) {
135 		case SPEED_100:
136 			mode = ECM_LINK_MODE_100;
137 			break;
138 		case SPEED_1000:
139 			mode = ECM_LINK_MODE_1000;
140 			break;
141 		default:
142 			mode = ECM_LINK_MODE_OFF;
143 			break;
144 		}
145 		iowrite32(mode, adapter->addr + ECM_STATUS);
146 	}
147 
148 	phy_print_status(netdev->phydev);
149 }
150 
151 static int tsnep_phy_open(struct tsnep_adapter *adapter)
152 {
153 	struct phy_device *phydev;
154 	struct ethtool_eee ethtool_eee;
155 	int retval;
156 
157 	retval = phy_connect_direct(adapter->netdev, adapter->phydev,
158 				    tsnep_phy_link_status_change,
159 				    adapter->phy_mode);
160 	if (retval)
161 		return retval;
162 	phydev = adapter->netdev->phydev;
163 
164 	/* MAC supports only 100Mbps|1000Mbps full duplex
165 	 * SPE (Single Pair Ethernet) is also an option but not implemented yet
166 	 */
167 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
168 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
169 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
170 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
171 
172 	/* disable EEE autoneg, EEE not supported by TSNEP */
173 	memset(&ethtool_eee, 0, sizeof(ethtool_eee));
174 	phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
175 
176 	adapter->phydev->irq = PHY_MAC_INTERRUPT;
177 	phy_start(adapter->phydev);
178 
179 	return 0;
180 }
181 
182 static void tsnep_phy_close(struct tsnep_adapter *adapter)
183 {
184 	phy_stop(adapter->netdev->phydev);
185 	phy_disconnect(adapter->netdev->phydev);
186 	adapter->netdev->phydev = NULL;
187 }
188 
189 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
190 {
191 	struct device *dmadev = tx->adapter->dmadev;
192 	int i;
193 
194 	memset(tx->entry, 0, sizeof(tx->entry));
195 
196 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
197 		if (tx->page[i]) {
198 			dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
199 					  tx->page_dma[i]);
200 			tx->page[i] = NULL;
201 			tx->page_dma[i] = 0;
202 		}
203 	}
204 }
205 
206 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
207 {
208 	struct device *dmadev = tx->adapter->dmadev;
209 	struct tsnep_tx_entry *entry;
210 	struct tsnep_tx_entry *next_entry;
211 	int i, j;
212 	int retval;
213 
214 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
215 		tx->page[i] =
216 			dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
217 					   GFP_KERNEL);
218 		if (!tx->page[i]) {
219 			retval = -ENOMEM;
220 			goto alloc_failed;
221 		}
222 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
223 			entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
224 			entry->desc_wb = (struct tsnep_tx_desc_wb *)
225 				(((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
226 			entry->desc = (struct tsnep_tx_desc *)
227 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
228 			entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
229 		}
230 	}
231 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
232 		entry = &tx->entry[i];
233 		next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
234 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
235 	}
236 
237 	return 0;
238 
239 alloc_failed:
240 	tsnep_tx_ring_cleanup(tx);
241 	return retval;
242 }
243 
244 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
245 {
246 	struct tsnep_tx_entry *entry = &tx->entry[index];
247 
248 	entry->properties = 0;
249 	if (entry->skb) {
250 		entry->properties =
251 			skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
252 		entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
253 		if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
254 			entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
255 
256 		/* toggle user flag to prevent false acknowledge
257 		 *
258 		 * Only the first fragment is acknowledged. For all other
259 		 * fragments no acknowledge is done and the last written owner
260 		 * counter stays in the writeback descriptor. Therefore, it is
261 		 * possible that the last written owner counter is identical to
262 		 * the new incremented owner counter and a false acknowledge is
263 		 * detected before the real acknowledge has been done by
264 		 * hardware.
265 		 *
266 		 * The user flag is used to prevent this situation. The user
267 		 * flag is copied to the writeback descriptor by the hardware
268 		 * and is used as additional acknowledge data. By toggeling the
269 		 * user flag only for the first fragment (which is
270 		 * acknowledged), it is guaranteed that the last acknowledge
271 		 * done for this descriptor has used a different user flag and
272 		 * cannot be detected as false acknowledge.
273 		 */
274 		entry->owner_user_flag = !entry->owner_user_flag;
275 	}
276 	if (last)
277 		entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
278 	if (index == tx->increment_owner_counter) {
279 		tx->owner_counter++;
280 		if (tx->owner_counter == 4)
281 			tx->owner_counter = 1;
282 		tx->increment_owner_counter--;
283 		if (tx->increment_owner_counter < 0)
284 			tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
285 	}
286 	entry->properties |=
287 		(tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
288 		TSNEP_DESC_OWNER_COUNTER_MASK;
289 	if (entry->owner_user_flag)
290 		entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
291 	entry->desc->more_properties =
292 		__cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
293 
294 	/* descriptor properties shall be written last, because valid data is
295 	 * signaled there
296 	 */
297 	dma_wmb();
298 
299 	entry->desc->properties = __cpu_to_le32(entry->properties);
300 }
301 
302 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
303 {
304 	if (tx->read <= tx->write)
305 		return TSNEP_RING_SIZE - tx->write + tx->read - 1;
306 	else
307 		return tx->read - tx->write - 1;
308 }
309 
310 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
311 {
312 	struct device *dmadev = tx->adapter->dmadev;
313 	struct tsnep_tx_entry *entry;
314 	unsigned int len;
315 	dma_addr_t dma;
316 	int i;
317 
318 	for (i = 0; i < count; i++) {
319 		entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
320 
321 		if (i == 0) {
322 			len = skb_headlen(skb);
323 			dma = dma_map_single(dmadev, skb->data, len,
324 					     DMA_TO_DEVICE);
325 		} else {
326 			len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
327 			dma = skb_frag_dma_map(dmadev,
328 					       &skb_shinfo(skb)->frags[i - 1],
329 					       0, len, DMA_TO_DEVICE);
330 		}
331 		if (dma_mapping_error(dmadev, dma))
332 			return -ENOMEM;
333 
334 		entry->len = len;
335 		dma_unmap_addr_set(entry, dma, dma);
336 
337 		entry->desc->tx = __cpu_to_le64(dma);
338 	}
339 
340 	return 0;
341 }
342 
343 static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
344 {
345 	struct device *dmadev = tx->adapter->dmadev;
346 	struct tsnep_tx_entry *entry;
347 	int i;
348 
349 	for (i = 0; i < count; i++) {
350 		entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
351 
352 		if (entry->len) {
353 			if (i == 0)
354 				dma_unmap_single(dmadev,
355 						 dma_unmap_addr(entry, dma),
356 						 dma_unmap_len(entry, len),
357 						 DMA_TO_DEVICE);
358 			else
359 				dma_unmap_page(dmadev,
360 					       dma_unmap_addr(entry, dma),
361 					       dma_unmap_len(entry, len),
362 					       DMA_TO_DEVICE);
363 			entry->len = 0;
364 		}
365 	}
366 }
367 
368 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
369 					 struct tsnep_tx *tx)
370 {
371 	unsigned long flags;
372 	int count = 1;
373 	struct tsnep_tx_entry *entry;
374 	int i;
375 	int retval;
376 
377 	if (skb_shinfo(skb)->nr_frags > 0)
378 		count += skb_shinfo(skb)->nr_frags;
379 
380 	spin_lock_irqsave(&tx->lock, flags);
381 
382 	if (tsnep_tx_desc_available(tx) < count) {
383 		/* ring full, shall not happen because queue is stopped if full
384 		 * below
385 		 */
386 		netif_stop_queue(tx->adapter->netdev);
387 
388 		spin_unlock_irqrestore(&tx->lock, flags);
389 
390 		return NETDEV_TX_BUSY;
391 	}
392 
393 	entry = &tx->entry[tx->write];
394 	entry->skb = skb;
395 
396 	retval = tsnep_tx_map(skb, tx, count);
397 	if (retval != 0) {
398 		tsnep_tx_unmap(tx, count);
399 		dev_kfree_skb_any(entry->skb);
400 		entry->skb = NULL;
401 
402 		tx->dropped++;
403 
404 		spin_unlock_irqrestore(&tx->lock, flags);
405 
406 		netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
407 
408 		return NETDEV_TX_OK;
409 	}
410 
411 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
412 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
413 
414 	for (i = 0; i < count; i++)
415 		tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
416 				  i == (count - 1));
417 	tx->write = (tx->write + count) % TSNEP_RING_SIZE;
418 
419 	skb_tx_timestamp(skb);
420 
421 	/* descriptor properties shall be valid before hardware is notified */
422 	dma_wmb();
423 
424 	iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
425 
426 	if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
427 		/* ring can get full with next frame */
428 		netif_stop_queue(tx->adapter->netdev);
429 	}
430 
431 	tx->packets++;
432 	tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
433 
434 	spin_unlock_irqrestore(&tx->lock, flags);
435 
436 	return NETDEV_TX_OK;
437 }
438 
439 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
440 {
441 	unsigned long flags;
442 	int budget = 128;
443 	struct tsnep_tx_entry *entry;
444 	int count;
445 
446 	spin_lock_irqsave(&tx->lock, flags);
447 
448 	do {
449 		if (tx->read == tx->write)
450 			break;
451 
452 		entry = &tx->entry[tx->read];
453 		if ((__le32_to_cpu(entry->desc_wb->properties) &
454 		     TSNEP_TX_DESC_OWNER_MASK) !=
455 		    (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
456 			break;
457 
458 		/* descriptor properties shall be read first, because valid data
459 		 * is signaled there
460 		 */
461 		dma_rmb();
462 
463 		count = 1;
464 		if (skb_shinfo(entry->skb)->nr_frags > 0)
465 			count += skb_shinfo(entry->skb)->nr_frags;
466 
467 		tsnep_tx_unmap(tx, count);
468 
469 		if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
470 		    (__le32_to_cpu(entry->desc_wb->properties) &
471 		     TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
472 			struct skb_shared_hwtstamps hwtstamps;
473 			u64 timestamp;
474 
475 			if (skb_shinfo(entry->skb)->tx_flags &
476 			    SKBTX_HW_TSTAMP_USE_CYCLES)
477 				timestamp =
478 					__le64_to_cpu(entry->desc_wb->counter);
479 			else
480 				timestamp =
481 					__le64_to_cpu(entry->desc_wb->timestamp);
482 
483 			memset(&hwtstamps, 0, sizeof(hwtstamps));
484 			hwtstamps.hwtstamp = ns_to_ktime(timestamp);
485 
486 			skb_tstamp_tx(entry->skb, &hwtstamps);
487 		}
488 
489 		napi_consume_skb(entry->skb, budget);
490 		entry->skb = NULL;
491 
492 		tx->read = (tx->read + count) % TSNEP_RING_SIZE;
493 
494 		budget--;
495 	} while (likely(budget));
496 
497 	if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
498 	    netif_queue_stopped(tx->adapter->netdev)) {
499 		netif_wake_queue(tx->adapter->netdev);
500 	}
501 
502 	spin_unlock_irqrestore(&tx->lock, flags);
503 
504 	return (budget != 0);
505 }
506 
507 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
508 			 struct tsnep_tx *tx)
509 {
510 	dma_addr_t dma;
511 	int retval;
512 
513 	memset(tx, 0, sizeof(*tx));
514 	tx->adapter = adapter;
515 	tx->addr = addr;
516 
517 	retval = tsnep_tx_ring_init(tx);
518 	if (retval)
519 		return retval;
520 
521 	dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
522 	iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
523 	iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
524 	tx->owner_counter = 1;
525 	tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
526 
527 	spin_lock_init(&tx->lock);
528 
529 	return 0;
530 }
531 
532 static void tsnep_tx_close(struct tsnep_tx *tx)
533 {
534 	u32 val;
535 
536 	readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
537 			   ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
538 			   1000000);
539 
540 	tsnep_tx_ring_cleanup(tx);
541 }
542 
543 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
544 {
545 	struct device *dmadev = rx->adapter->dmadev;
546 	struct tsnep_rx_entry *entry;
547 	int i;
548 
549 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
550 		entry = &rx->entry[i];
551 		if (dma_unmap_addr(entry, dma))
552 			dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
553 					 dma_unmap_len(entry, len),
554 					 DMA_FROM_DEVICE);
555 		if (entry->skb)
556 			dev_kfree_skb(entry->skb);
557 	}
558 
559 	memset(rx->entry, 0, sizeof(rx->entry));
560 
561 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
562 		if (rx->page[i]) {
563 			dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
564 					  rx->page_dma[i]);
565 			rx->page[i] = NULL;
566 			rx->page_dma[i] = 0;
567 		}
568 	}
569 }
570 
571 static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
572 				      struct tsnep_rx_entry *entry)
573 {
574 	struct device *dmadev = rx->adapter->dmadev;
575 	struct sk_buff *skb;
576 	dma_addr_t dma;
577 
578 	skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
579 				 GFP_ATOMIC | GFP_DMA);
580 	if (!skb)
581 		return -ENOMEM;
582 
583 	skb_reserve(skb, RX_SKB_RESERVE);
584 
585 	dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
586 			     DMA_FROM_DEVICE);
587 	if (dma_mapping_error(dmadev, dma)) {
588 		dev_kfree_skb(skb);
589 		return -ENOMEM;
590 	}
591 
592 	entry->skb = skb;
593 	entry->len = RX_SKB_LENGTH;
594 	dma_unmap_addr_set(entry, dma, dma);
595 	entry->desc->rx = __cpu_to_le64(dma);
596 
597 	return 0;
598 }
599 
600 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
601 {
602 	struct device *dmadev = rx->adapter->dmadev;
603 	struct tsnep_rx_entry *entry;
604 	struct tsnep_rx_entry *next_entry;
605 	int i, j;
606 	int retval;
607 
608 	for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
609 		rx->page[i] =
610 			dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
611 					   GFP_KERNEL);
612 		if (!rx->page[i]) {
613 			retval = -ENOMEM;
614 			goto failed;
615 		}
616 		for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
617 			entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
618 			entry->desc_wb = (struct tsnep_rx_desc_wb *)
619 				(((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
620 			entry->desc = (struct tsnep_rx_desc *)
621 				(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
622 			entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
623 		}
624 	}
625 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
626 		entry = &rx->entry[i];
627 		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
628 		entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
629 
630 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
631 		if (retval)
632 			goto failed;
633 	}
634 
635 	return 0;
636 
637 failed:
638 	tsnep_rx_ring_cleanup(rx);
639 	return retval;
640 }
641 
642 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
643 {
644 	struct tsnep_rx_entry *entry = &rx->entry[index];
645 
646 	/* RX_SKB_LENGTH is a multiple of 4 */
647 	entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
648 	entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
649 	if (index == rx->increment_owner_counter) {
650 		rx->owner_counter++;
651 		if (rx->owner_counter == 4)
652 			rx->owner_counter = 1;
653 		rx->increment_owner_counter--;
654 		if (rx->increment_owner_counter < 0)
655 			rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
656 	}
657 	entry->properties |=
658 		(rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
659 		TSNEP_DESC_OWNER_COUNTER_MASK;
660 
661 	/* descriptor properties shall be written last, because valid data is
662 	 * signaled there
663 	 */
664 	dma_wmb();
665 
666 	entry->desc->properties = __cpu_to_le32(entry->properties);
667 }
668 
669 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
670 			 int budget)
671 {
672 	struct device *dmadev = rx->adapter->dmadev;
673 	int done = 0;
674 	struct tsnep_rx_entry *entry;
675 	struct sk_buff *skb;
676 	size_t len;
677 	dma_addr_t dma;
678 	int length;
679 	bool enable = false;
680 	int retval;
681 
682 	while (likely(done < budget)) {
683 		entry = &rx->entry[rx->read];
684 		if ((__le32_to_cpu(entry->desc_wb->properties) &
685 		     TSNEP_DESC_OWNER_COUNTER_MASK) !=
686 		    (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
687 			break;
688 
689 		/* descriptor properties shall be read first, because valid data
690 		 * is signaled there
691 		 */
692 		dma_rmb();
693 
694 		skb = entry->skb;
695 		len = dma_unmap_len(entry, len);
696 		dma = dma_unmap_addr(entry, dma);
697 
698 		/* forward skb only if allocation is successful, otherwise
699 		 * skb is reused and frame dropped
700 		 */
701 		retval = tsnep_rx_alloc_and_map_skb(rx, entry);
702 		if (!retval) {
703 			dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
704 
705 			length = __le32_to_cpu(entry->desc_wb->properties) &
706 				 TSNEP_DESC_LENGTH_MASK;
707 			skb_put(skb, length - ETH_FCS_LEN);
708 			if (rx->adapter->hwtstamp_config.rx_filter ==
709 			    HWTSTAMP_FILTER_ALL) {
710 				struct skb_shared_hwtstamps *hwtstamps =
711 					skb_hwtstamps(skb);
712 				struct tsnep_rx_inline *rx_inline =
713 					(struct tsnep_rx_inline *)skb->data;
714 
715 				skb_shinfo(skb)->tx_flags |=
716 					SKBTX_HW_TSTAMP_NETDEV;
717 				memset(hwtstamps, 0, sizeof(*hwtstamps));
718 				hwtstamps->netdev_data = rx_inline;
719 			}
720 			skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
721 			skb->protocol = eth_type_trans(skb,
722 						       rx->adapter->netdev);
723 
724 			rx->packets++;
725 			rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
726 			if (skb->pkt_type == PACKET_MULTICAST)
727 				rx->multicast++;
728 
729 			napi_gro_receive(napi, skb);
730 			done++;
731 		} else {
732 			rx->dropped++;
733 		}
734 
735 		tsnep_rx_activate(rx, rx->read);
736 
737 		enable = true;
738 
739 		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
740 	}
741 
742 	if (enable) {
743 		/* descriptor properties shall be valid before hardware is
744 		 * notified
745 		 */
746 		dma_wmb();
747 
748 		iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
749 	}
750 
751 	return done;
752 }
753 
754 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
755 			 struct tsnep_rx *rx)
756 {
757 	dma_addr_t dma;
758 	int i;
759 	int retval;
760 
761 	memset(rx, 0, sizeof(*rx));
762 	rx->adapter = adapter;
763 	rx->addr = addr;
764 
765 	retval = tsnep_rx_ring_init(rx);
766 	if (retval)
767 		return retval;
768 
769 	dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
770 	iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
771 	iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
772 	rx->owner_counter = 1;
773 	rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
774 
775 	for (i = 0; i < TSNEP_RING_SIZE; i++)
776 		tsnep_rx_activate(rx, i);
777 
778 	/* descriptor properties shall be valid before hardware is notified */
779 	dma_wmb();
780 
781 	iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
782 
783 	return 0;
784 }
785 
786 static void tsnep_rx_close(struct tsnep_rx *rx)
787 {
788 	u32 val;
789 
790 	iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
791 	readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
792 			   ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
793 			   1000000);
794 
795 	tsnep_rx_ring_cleanup(rx);
796 }
797 
798 static int tsnep_poll(struct napi_struct *napi, int budget)
799 {
800 	struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
801 						 napi);
802 	bool complete = true;
803 	int done = 0;
804 
805 	if (queue->tx)
806 		complete = tsnep_tx_poll(queue->tx, budget);
807 
808 	if (queue->rx) {
809 		done = tsnep_rx_poll(queue->rx, napi, budget);
810 		if (done >= budget)
811 			complete = false;
812 	}
813 
814 	/* if all work not completed, return budget and keep polling */
815 	if (!complete)
816 		return budget;
817 
818 	if (likely(napi_complete_done(napi, done)))
819 		tsnep_enable_irq(queue->adapter, queue->irq_mask);
820 
821 	return min(done, budget - 1);
822 }
823 
824 static int tsnep_netdev_open(struct net_device *netdev)
825 {
826 	struct tsnep_adapter *adapter = netdev_priv(netdev);
827 	int i;
828 	void __iomem *addr;
829 	int tx_queue_index = 0;
830 	int rx_queue_index = 0;
831 	int retval;
832 
833 	retval = tsnep_phy_open(adapter);
834 	if (retval)
835 		return retval;
836 
837 	for (i = 0; i < adapter->num_queues; i++) {
838 		adapter->queue[i].adapter = adapter;
839 		if (adapter->queue[i].tx) {
840 			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
841 			retval = tsnep_tx_open(adapter, addr,
842 					       adapter->queue[i].tx);
843 			if (retval)
844 				goto failed;
845 			tx_queue_index++;
846 		}
847 		if (adapter->queue[i].rx) {
848 			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
849 			retval = tsnep_rx_open(adapter, addr,
850 					       adapter->queue[i].rx);
851 			if (retval)
852 				goto failed;
853 			rx_queue_index++;
854 		}
855 	}
856 
857 	retval = netif_set_real_num_tx_queues(adapter->netdev,
858 					      adapter->num_tx_queues);
859 	if (retval)
860 		goto failed;
861 	retval = netif_set_real_num_rx_queues(adapter->netdev,
862 					      adapter->num_rx_queues);
863 	if (retval)
864 		goto failed;
865 
866 	for (i = 0; i < adapter->num_queues; i++) {
867 		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
868 			       tsnep_poll, 64);
869 		napi_enable(&adapter->queue[i].napi);
870 
871 		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
872 	}
873 
874 	return 0;
875 
876 failed:
877 	for (i = 0; i < adapter->num_queues; i++) {
878 		if (adapter->queue[i].rx)
879 			tsnep_rx_close(adapter->queue[i].rx);
880 		if (adapter->queue[i].tx)
881 			tsnep_tx_close(adapter->queue[i].tx);
882 	}
883 	tsnep_phy_close(adapter);
884 	return retval;
885 }
886 
887 static int tsnep_netdev_close(struct net_device *netdev)
888 {
889 	struct tsnep_adapter *adapter = netdev_priv(netdev);
890 	int i;
891 
892 	for (i = 0; i < adapter->num_queues; i++) {
893 		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
894 
895 		napi_disable(&adapter->queue[i].napi);
896 		netif_napi_del(&adapter->queue[i].napi);
897 
898 		if (adapter->queue[i].rx)
899 			tsnep_rx_close(adapter->queue[i].rx);
900 		if (adapter->queue[i].tx)
901 			tsnep_tx_close(adapter->queue[i].tx);
902 	}
903 
904 	tsnep_phy_close(adapter);
905 
906 	return 0;
907 }
908 
909 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
910 					   struct net_device *netdev)
911 {
912 	struct tsnep_adapter *adapter = netdev_priv(netdev);
913 	u16 queue_mapping = skb_get_queue_mapping(skb);
914 
915 	if (queue_mapping >= adapter->num_tx_queues)
916 		queue_mapping = 0;
917 
918 	return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
919 }
920 
921 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
922 			      int cmd)
923 {
924 	if (!netif_running(netdev))
925 		return -EINVAL;
926 	if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
927 		return tsnep_ptp_ioctl(netdev, ifr, cmd);
928 	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
929 }
930 
931 static void tsnep_netdev_set_multicast(struct net_device *netdev)
932 {
933 	struct tsnep_adapter *adapter = netdev_priv(netdev);
934 
935 	u16 rx_filter = 0;
936 
937 	/* configured MAC address and broadcasts are never filtered */
938 	if (netdev->flags & IFF_PROMISC) {
939 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
940 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
941 	} else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
942 		rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
943 	}
944 	iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
945 }
946 
947 static void tsnep_netdev_get_stats64(struct net_device *netdev,
948 				     struct rtnl_link_stats64 *stats)
949 {
950 	struct tsnep_adapter *adapter = netdev_priv(netdev);
951 	u32 reg;
952 	u32 val;
953 	int i;
954 
955 	for (i = 0; i < adapter->num_tx_queues; i++) {
956 		stats->tx_packets += adapter->tx[i].packets;
957 		stats->tx_bytes += adapter->tx[i].bytes;
958 		stats->tx_dropped += adapter->tx[i].dropped;
959 	}
960 	for (i = 0; i < adapter->num_rx_queues; i++) {
961 		stats->rx_packets += adapter->rx[i].packets;
962 		stats->rx_bytes += adapter->rx[i].bytes;
963 		stats->rx_dropped += adapter->rx[i].dropped;
964 		stats->multicast += adapter->rx[i].multicast;
965 
966 		reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
967 			       TSNEP_RX_STATISTIC);
968 		val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
969 		      TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
970 		stats->rx_dropped += val;
971 		val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
972 		      TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
973 		stats->rx_dropped += val;
974 		val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
975 		      TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
976 		stats->rx_errors += val;
977 		stats->rx_fifo_errors += val;
978 		val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
979 		      TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
980 		stats->rx_errors += val;
981 		stats->rx_frame_errors += val;
982 	}
983 
984 	reg = ioread32(adapter->addr + ECM_STAT);
985 	val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
986 	stats->rx_errors += val;
987 	val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
988 	stats->rx_errors += val;
989 	stats->rx_crc_errors += val;
990 	val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
991 	stats->rx_errors += val;
992 }
993 
994 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
995 {
996 	iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
997 	iowrite16(*(u16 *)(addr + sizeof(u32)),
998 		  adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
999 
1000 	ether_addr_copy(adapter->mac_address, addr);
1001 	netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
1002 		   addr);
1003 }
1004 
1005 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
1006 {
1007 	struct tsnep_adapter *adapter = netdev_priv(netdev);
1008 	struct sockaddr *sock_addr = addr;
1009 	int retval;
1010 
1011 	retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1012 	if (retval)
1013 		return retval;
1014 	eth_hw_addr_set(netdev, sock_addr->sa_data);
1015 	tsnep_mac_set_address(adapter, sock_addr->sa_data);
1016 
1017 	return 0;
1018 }
1019 
1020 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1021 				       const struct skb_shared_hwtstamps *hwtstamps,
1022 				       bool cycles)
1023 {
1024 	struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
1025 	u64 timestamp;
1026 
1027 	if (cycles)
1028 		timestamp = __le64_to_cpu(rx_inline->counter);
1029 	else
1030 		timestamp = __le64_to_cpu(rx_inline->timestamp);
1031 
1032 	return ns_to_ktime(timestamp);
1033 }
1034 
1035 static const struct net_device_ops tsnep_netdev_ops = {
1036 	.ndo_open = tsnep_netdev_open,
1037 	.ndo_stop = tsnep_netdev_close,
1038 	.ndo_start_xmit = tsnep_netdev_xmit_frame,
1039 	.ndo_eth_ioctl = tsnep_netdev_ioctl,
1040 	.ndo_set_rx_mode = tsnep_netdev_set_multicast,
1041 
1042 	.ndo_get_stats64 = tsnep_netdev_get_stats64,
1043 	.ndo_set_mac_address = tsnep_netdev_set_mac_address,
1044 	.ndo_get_tstamp = tsnep_netdev_get_tstamp,
1045 	.ndo_setup_tc = tsnep_tc_setup,
1046 };
1047 
1048 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1049 {
1050 	int retval;
1051 
1052 	/* initialize RX filtering, at least configured MAC address and
1053 	 * broadcast are not filtered
1054 	 */
1055 	iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1056 
1057 	/* try to get MAC address in the following order:
1058 	 * - device tree
1059 	 * - valid MAC address already set
1060 	 * - MAC address register if valid
1061 	 * - random MAC address
1062 	 */
1063 	retval = of_get_mac_address(adapter->pdev->dev.of_node,
1064 				    adapter->mac_address);
1065 	if (retval == -EPROBE_DEFER)
1066 		return retval;
1067 	if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1068 		*(u32 *)adapter->mac_address =
1069 			ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1070 		*(u16 *)(adapter->mac_address + sizeof(u32)) =
1071 			ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1072 		if (!is_valid_ether_addr(adapter->mac_address))
1073 			eth_random_addr(adapter->mac_address);
1074 	}
1075 
1076 	tsnep_mac_set_address(adapter, adapter->mac_address);
1077 	eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1078 
1079 	return 0;
1080 }
1081 
1082 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1083 {
1084 	struct device_node *np = adapter->pdev->dev.of_node;
1085 	int retval;
1086 
1087 	if (np) {
1088 		np = of_get_child_by_name(np, "mdio");
1089 		if (!np)
1090 			return 0;
1091 
1092 		adapter->suppress_preamble =
1093 			of_property_read_bool(np, "suppress-preamble");
1094 	}
1095 
1096 	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1097 	if (!adapter->mdiobus) {
1098 		retval = -ENOMEM;
1099 
1100 		goto out;
1101 	}
1102 
1103 	adapter->mdiobus->priv = (void *)adapter;
1104 	adapter->mdiobus->parent = &adapter->pdev->dev;
1105 	adapter->mdiobus->read = tsnep_mdiobus_read;
1106 	adapter->mdiobus->write = tsnep_mdiobus_write;
1107 	adapter->mdiobus->name = TSNEP "-mdiobus";
1108 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1109 		 adapter->pdev->name);
1110 
1111 	/* do not scan broadcast address */
1112 	adapter->mdiobus->phy_mask = 0x0000001;
1113 
1114 	retval = of_mdiobus_register(adapter->mdiobus, np);
1115 
1116 out:
1117 	of_node_put(np);
1118 
1119 	return retval;
1120 }
1121 
1122 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1123 {
1124 	struct device_node *phy_node;
1125 	int retval;
1126 
1127 	retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1128 				 &adapter->phy_mode);
1129 	if (retval)
1130 		adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1131 
1132 	phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1133 				    0);
1134 	adapter->phydev = of_phy_find_device(phy_node);
1135 	of_node_put(phy_node);
1136 	if (!adapter->phydev && adapter->mdiobus)
1137 		adapter->phydev = phy_find_first(adapter->mdiobus);
1138 	if (!adapter->phydev)
1139 		return -EIO;
1140 
1141 	return 0;
1142 }
1143 
1144 static int tsnep_probe(struct platform_device *pdev)
1145 {
1146 	struct tsnep_adapter *adapter;
1147 	struct net_device *netdev;
1148 	struct resource *io;
1149 	u32 type;
1150 	int revision;
1151 	int version;
1152 	int retval;
1153 
1154 	netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1155 					 sizeof(struct tsnep_adapter),
1156 					 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1157 	if (!netdev)
1158 		return -ENODEV;
1159 	SET_NETDEV_DEV(netdev, &pdev->dev);
1160 	adapter = netdev_priv(netdev);
1161 	platform_set_drvdata(pdev, adapter);
1162 	adapter->pdev = pdev;
1163 	adapter->dmadev = &pdev->dev;
1164 	adapter->netdev = netdev;
1165 	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1166 			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1167 			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1168 
1169 	netdev->min_mtu = ETH_MIN_MTU;
1170 	netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1171 
1172 	mutex_init(&adapter->gate_control_lock);
1173 
1174 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1175 	adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1176 	if (IS_ERR(adapter->addr))
1177 		return PTR_ERR(adapter->addr);
1178 	adapter->irq = platform_get_irq(pdev, 0);
1179 	netdev->mem_start = io->start;
1180 	netdev->mem_end = io->end;
1181 	netdev->irq = adapter->irq;
1182 
1183 	type = ioread32(adapter->addr + ECM_TYPE);
1184 	revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1185 	version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1186 	adapter->gate_control = type & ECM_GATE_CONTROL;
1187 
1188 	adapter->num_tx_queues = TSNEP_QUEUES;
1189 	adapter->num_rx_queues = TSNEP_QUEUES;
1190 	adapter->num_queues = TSNEP_QUEUES;
1191 	adapter->queue[0].tx = &adapter->tx[0];
1192 	adapter->queue[0].rx = &adapter->rx[0];
1193 	adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1194 
1195 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1196 	retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
1197 				  0, TSNEP, adapter);
1198 	if (retval != 0) {
1199 		dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
1200 			adapter->irq);
1201 		return retval;
1202 	}
1203 	tsnep_enable_irq(adapter, ECM_INT_LINK);
1204 
1205 	retval = tsnep_mac_init(adapter);
1206 	if (retval)
1207 		goto mac_init_failed;
1208 
1209 	retval = tsnep_mdio_init(adapter);
1210 	if (retval)
1211 		goto mdio_init_failed;
1212 
1213 	retval = tsnep_phy_init(adapter);
1214 	if (retval)
1215 		goto phy_init_failed;
1216 
1217 	retval = tsnep_ptp_init(adapter);
1218 	if (retval)
1219 		goto ptp_init_failed;
1220 
1221 	retval = tsnep_tc_init(adapter);
1222 	if (retval)
1223 		goto tc_init_failed;
1224 
1225 	netdev->netdev_ops = &tsnep_netdev_ops;
1226 	netdev->ethtool_ops = &tsnep_ethtool_ops;
1227 	netdev->features = NETIF_F_SG;
1228 	netdev->hw_features = netdev->features;
1229 
1230 	/* carrier off reporting is important to ethtool even BEFORE open */
1231 	netif_carrier_off(netdev);
1232 
1233 	retval = register_netdev(netdev);
1234 	if (retval)
1235 		goto register_failed;
1236 
1237 	dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1238 		 revision);
1239 	if (adapter->gate_control)
1240 		dev_info(&adapter->pdev->dev, "gate control detected\n");
1241 
1242 	return 0;
1243 
1244 register_failed:
1245 	tsnep_tc_cleanup(adapter);
1246 tc_init_failed:
1247 	tsnep_ptp_cleanup(adapter);
1248 ptp_init_failed:
1249 phy_init_failed:
1250 	if (adapter->mdiobus)
1251 		mdiobus_unregister(adapter->mdiobus);
1252 mdio_init_failed:
1253 mac_init_failed:
1254 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1255 	return retval;
1256 }
1257 
1258 static int tsnep_remove(struct platform_device *pdev)
1259 {
1260 	struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1261 
1262 	unregister_netdev(adapter->netdev);
1263 
1264 	tsnep_tc_cleanup(adapter);
1265 
1266 	tsnep_ptp_cleanup(adapter);
1267 
1268 	if (adapter->mdiobus)
1269 		mdiobus_unregister(adapter->mdiobus);
1270 
1271 	tsnep_disable_irq(adapter, ECM_INT_ALL);
1272 
1273 	return 0;
1274 }
1275 
1276 static const struct of_device_id tsnep_of_match[] = {
1277 	{ .compatible = "engleder,tsnep", },
1278 { },
1279 };
1280 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1281 
1282 static struct platform_driver tsnep_driver = {
1283 	.driver = {
1284 		.name = TSNEP,
1285 		.of_match_table = of_match_ptr(tsnep_of_match),
1286 	},
1287 	.probe = tsnep_probe,
1288 	.remove = tsnep_remove,
1289 };
1290 module_platform_driver(tsnep_driver);
1291 
1292 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1293 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1294 MODULE_LICENSE("GPL");
1295