1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
3  */
4 
5 /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
6 
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_net.h>
14 #include <linux/of_device.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/acpi.h>
18 #include "emac.h"
19 #include "emac-mac.h"
20 #include "emac-phy.h"
21 #include "emac-sgmii.h"
22 
23 #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |  \
24 		NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
25 
26 #define EMAC_RRD_SIZE					     4
27 /* The RRD size if timestamping is enabled: */
28 #define EMAC_TS_RRD_SIZE				     6
29 #define EMAC_TPD_SIZE					     4
30 #define EMAC_RFD_SIZE					     2
31 
32 #define REG_MAC_RX_STATUS_BIN		 EMAC_RXMAC_STATC_REG0
33 #define REG_MAC_RX_STATUS_END		EMAC_RXMAC_STATC_REG22
34 #define REG_MAC_TX_STATUS_BIN		 EMAC_TXMAC_STATC_REG0
35 #define REG_MAC_TX_STATUS_END		EMAC_TXMAC_STATC_REG24
36 
37 #define RXQ0_NUM_RFD_PREF_DEF				     8
38 #define TXQ0_NUM_TPD_PREF_DEF				     5
39 
40 #define EMAC_PREAMBLE_DEF				     7
41 
42 #define DMAR_DLY_CNT_DEF				    15
43 #define DMAW_DLY_CNT_DEF				     4
44 
45 #define IMR_NORMAL_MASK		(ISR_ERROR | ISR_OVER | ISR_TX_PKT)
46 
47 #define ISR_TX_PKT      (\
48 	TX_PKT_INT      |\
49 	TX_PKT_INT1     |\
50 	TX_PKT_INT2     |\
51 	TX_PKT_INT3)
52 
53 #define ISR_OVER        (\
54 	RFD0_UR_INT     |\
55 	RFD1_UR_INT     |\
56 	RFD2_UR_INT     |\
57 	RFD3_UR_INT     |\
58 	RFD4_UR_INT     |\
59 	RXF_OF_INT      |\
60 	TXF_UR_INT)
61 
62 #define ISR_ERROR       (\
63 	DMAR_TO_INT     |\
64 	DMAW_TO_INT     |\
65 	TXQ_TO_INT)
66 
67 /* in sync with enum emac_clk_id */
68 static const char * const emac_clk_name[] = {
69 	"axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
70 	"rx_clk", "sys_clk"
71 };
72 
73 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
74 {
75 	u32 data = readl(addr);
76 
77 	writel(((data & ~mask) | val), addr);
78 }
79 
80 /* reinitialize */
81 int emac_reinit_locked(struct emac_adapter *adpt)
82 {
83 	int ret;
84 
85 	mutex_lock(&adpt->reset_lock);
86 
87 	emac_mac_down(adpt);
88 	emac_sgmii_reset(adpt);
89 	ret = emac_mac_up(adpt);
90 
91 	mutex_unlock(&adpt->reset_lock);
92 
93 	return ret;
94 }
95 
96 /* NAPI */
97 static int emac_napi_rtx(struct napi_struct *napi, int budget)
98 {
99 	struct emac_rx_queue *rx_q =
100 		container_of(napi, struct emac_rx_queue, napi);
101 	struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
102 	struct emac_irq *irq = rx_q->irq;
103 	int work_done = 0;
104 
105 	emac_mac_rx_process(adpt, rx_q, &work_done, budget);
106 
107 	if (work_done < budget) {
108 		napi_complete_done(napi, work_done);
109 
110 		irq->mask |= rx_q->intr;
111 		writel(irq->mask, adpt->base + EMAC_INT_MASK);
112 	}
113 
114 	return work_done;
115 }
116 
117 /* Transmit the packet */
118 static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
119 				   struct net_device *netdev)
120 {
121 	struct emac_adapter *adpt = netdev_priv(netdev);
122 
123 	return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
124 }
125 
126 static irqreturn_t emac_isr(int _irq, void *data)
127 {
128 	struct emac_irq *irq = data;
129 	struct emac_adapter *adpt =
130 		container_of(irq, struct emac_adapter, irq);
131 	struct emac_rx_queue *rx_q = &adpt->rx_q;
132 	u32 isr, status;
133 
134 	/* disable the interrupt */
135 	writel(0, adpt->base + EMAC_INT_MASK);
136 
137 	isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
138 
139 	status = isr & irq->mask;
140 	if (status == 0)
141 		goto exit;
142 
143 	if (status & ISR_ERROR) {
144 		net_err_ratelimited("%s: error interrupt 0x%lx\n",
145 				    adpt->netdev->name, status & ISR_ERROR);
146 		/* reset MAC */
147 		schedule_work(&adpt->work_thread);
148 	}
149 
150 	/* Schedule the napi for receive queue with interrupt
151 	 * status bit set
152 	 */
153 	if (status & rx_q->intr) {
154 		if (napi_schedule_prep(&rx_q->napi)) {
155 			irq->mask &= ~rx_q->intr;
156 			__napi_schedule(&rx_q->napi);
157 		}
158 	}
159 
160 	if (status & TX_PKT_INT)
161 		emac_mac_tx_process(adpt, &adpt->tx_q);
162 
163 	if (status & ISR_OVER)
164 		net_warn_ratelimited("%s: TX/RX overflow interrupt\n",
165 				     adpt->netdev->name);
166 
167 exit:
168 	/* enable the interrupt */
169 	writel(irq->mask, adpt->base + EMAC_INT_MASK);
170 
171 	return IRQ_HANDLED;
172 }
173 
174 /* Configure VLAN tag strip/insert feature */
175 static int emac_set_features(struct net_device *netdev,
176 			     netdev_features_t features)
177 {
178 	netdev_features_t changed = features ^ netdev->features;
179 	struct emac_adapter *adpt = netdev_priv(netdev);
180 
181 	/* We only need to reprogram the hardware if the VLAN tag features
182 	 * have changed, and if it's already running.
183 	 */
184 	if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
185 		return 0;
186 
187 	if (!netif_running(netdev))
188 		return 0;
189 
190 	/* emac_mac_mode_config() uses netdev->features to configure the EMAC,
191 	 * so make sure it's set first.
192 	 */
193 	netdev->features = features;
194 
195 	return emac_reinit_locked(adpt);
196 }
197 
198 /* Configure Multicast and Promiscuous modes */
199 static void emac_rx_mode_set(struct net_device *netdev)
200 {
201 	struct emac_adapter *adpt = netdev_priv(netdev);
202 	struct netdev_hw_addr *ha;
203 
204 	emac_mac_mode_config(adpt);
205 
206 	/* update multicast address filtering */
207 	emac_mac_multicast_addr_clear(adpt);
208 	netdev_for_each_mc_addr(ha, netdev)
209 		emac_mac_multicast_addr_set(adpt, ha->addr);
210 }
211 
212 /* Change the Maximum Transfer Unit (MTU) */
213 static int emac_change_mtu(struct net_device *netdev, int new_mtu)
214 {
215 	struct emac_adapter *adpt = netdev_priv(netdev);
216 
217 	netif_dbg(adpt, hw, adpt->netdev,
218 		  "changing MTU from %d to %d\n", netdev->mtu,
219 		  new_mtu);
220 	netdev->mtu = new_mtu;
221 
222 	if (netif_running(netdev))
223 		return emac_reinit_locked(adpt);
224 
225 	return 0;
226 }
227 
228 /* Called when the network interface is made active */
229 static int emac_open(struct net_device *netdev)
230 {
231 	struct emac_adapter *adpt = netdev_priv(netdev);
232 	struct emac_irq	*irq = &adpt->irq;
233 	int ret;
234 
235 	ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
236 	if (ret) {
237 		netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
238 		return ret;
239 	}
240 
241 	/* allocate rx/tx dma buffer & descriptors */
242 	ret = emac_mac_rx_tx_rings_alloc_all(adpt);
243 	if (ret) {
244 		netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
245 		free_irq(irq->irq, irq);
246 		return ret;
247 	}
248 
249 	ret = emac_sgmii_open(adpt);
250 	if (ret) {
251 		emac_mac_rx_tx_rings_free_all(adpt);
252 		free_irq(irq->irq, irq);
253 		return ret;
254 	}
255 
256 	ret = emac_mac_up(adpt);
257 	if (ret) {
258 		emac_mac_rx_tx_rings_free_all(adpt);
259 		free_irq(irq->irq, irq);
260 		emac_sgmii_close(adpt);
261 		return ret;
262 	}
263 
264 	return 0;
265 }
266 
267 /* Called when the network interface is disabled */
268 static int emac_close(struct net_device *netdev)
269 {
270 	struct emac_adapter *adpt = netdev_priv(netdev);
271 
272 	mutex_lock(&adpt->reset_lock);
273 
274 	emac_sgmii_close(adpt);
275 	emac_mac_down(adpt);
276 	emac_mac_rx_tx_rings_free_all(adpt);
277 
278 	free_irq(adpt->irq.irq, &adpt->irq);
279 
280 	mutex_unlock(&adpt->reset_lock);
281 
282 	return 0;
283 }
284 
285 /* Respond to a TX hang */
286 static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
287 {
288 	struct emac_adapter *adpt = netdev_priv(netdev);
289 
290 	schedule_work(&adpt->work_thread);
291 }
292 
293 /**
294  * emac_update_hw_stats - read the EMAC stat registers
295  *
296  * Reads the stats registers and write the values to adpt->stats.
297  *
298  * adpt->stats.lock must be held while calling this function,
299  * and while reading from adpt->stats.
300  */
301 void emac_update_hw_stats(struct emac_adapter *adpt)
302 {
303 	struct emac_stats *stats = &adpt->stats;
304 	u64 *stats_itr = &adpt->stats.rx_ok;
305 	void __iomem *base = adpt->base;
306 	unsigned int addr;
307 
308 	addr = REG_MAC_RX_STATUS_BIN;
309 	while (addr <= REG_MAC_RX_STATUS_END) {
310 		*stats_itr += readl_relaxed(base + addr);
311 		stats_itr++;
312 		addr += sizeof(u32);
313 	}
314 
315 	/* additional rx status */
316 	stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
317 	stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
318 
319 	/* update tx status */
320 	addr = REG_MAC_TX_STATUS_BIN;
321 	stats_itr = &stats->tx_ok;
322 
323 	while (addr <= REG_MAC_TX_STATUS_END) {
324 		*stats_itr += readl_relaxed(base + addr);
325 		stats_itr++;
326 		addr += sizeof(u32);
327 	}
328 
329 	/* additional tx status */
330 	stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
331 }
332 
333 /* Provide network statistics info for the interface */
334 static void emac_get_stats64(struct net_device *netdev,
335 			     struct rtnl_link_stats64 *net_stats)
336 {
337 	struct emac_adapter *adpt = netdev_priv(netdev);
338 	struct emac_stats *stats = &adpt->stats;
339 
340 	spin_lock(&stats->lock);
341 
342 	emac_update_hw_stats(adpt);
343 
344 	/* return parsed statistics */
345 	net_stats->rx_packets = stats->rx_ok;
346 	net_stats->tx_packets = stats->tx_ok;
347 	net_stats->rx_bytes = stats->rx_byte_cnt;
348 	net_stats->tx_bytes = stats->tx_byte_cnt;
349 	net_stats->multicast = stats->rx_mcast;
350 	net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
351 				stats->tx_late_col + stats->tx_abort_col;
352 
353 	net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
354 			       stats->rx_len_err + stats->rx_sz_ov +
355 			       stats->rx_align_err;
356 	net_stats->rx_fifo_errors = stats->rx_rxf_ov;
357 	net_stats->rx_length_errors = stats->rx_len_err;
358 	net_stats->rx_crc_errors = stats->rx_fcs_err;
359 	net_stats->rx_frame_errors = stats->rx_align_err;
360 	net_stats->rx_over_errors = stats->rx_rxf_ov;
361 	net_stats->rx_missed_errors = stats->rx_rxf_ov;
362 
363 	net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
364 			       stats->tx_underrun + stats->tx_trunc;
365 	net_stats->tx_fifo_errors = stats->tx_underrun;
366 	net_stats->tx_aborted_errors = stats->tx_abort_col;
367 	net_stats->tx_window_errors = stats->tx_late_col;
368 
369 	spin_unlock(&stats->lock);
370 }
371 
372 static const struct net_device_ops emac_netdev_ops = {
373 	.ndo_open		= emac_open,
374 	.ndo_stop		= emac_close,
375 	.ndo_validate_addr	= eth_validate_addr,
376 	.ndo_start_xmit		= emac_start_xmit,
377 	.ndo_set_mac_address	= eth_mac_addr,
378 	.ndo_change_mtu		= emac_change_mtu,
379 	.ndo_do_ioctl		= phy_do_ioctl_running,
380 	.ndo_tx_timeout		= emac_tx_timeout,
381 	.ndo_get_stats64	= emac_get_stats64,
382 	.ndo_set_features       = emac_set_features,
383 	.ndo_set_rx_mode        = emac_rx_mode_set,
384 };
385 
386 /* Watchdog task routine, called to reinitialize the EMAC */
387 static void emac_work_thread(struct work_struct *work)
388 {
389 	struct emac_adapter *adpt =
390 		container_of(work, struct emac_adapter, work_thread);
391 
392 	emac_reinit_locked(adpt);
393 }
394 
395 /* Initialize various data structures  */
396 static void emac_init_adapter(struct emac_adapter *adpt)
397 {
398 	u32 reg;
399 
400 	adpt->rrd_size = EMAC_RRD_SIZE;
401 	adpt->tpd_size = EMAC_TPD_SIZE;
402 	adpt->rfd_size = EMAC_RFD_SIZE;
403 
404 	/* descriptors */
405 	adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
406 	adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
407 
408 	/* dma */
409 	adpt->dma_order = emac_dma_ord_out;
410 	adpt->dmar_block = emac_dma_req_4096;
411 	adpt->dmaw_block = emac_dma_req_128;
412 	adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
413 	adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
414 	adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
415 	adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
416 
417 	/* irq moderator */
418 	reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
419 	      ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
420 	adpt->irq_mod = reg;
421 
422 	/* others */
423 	adpt->preamble = EMAC_PREAMBLE_DEF;
424 
425 	/* default to automatic flow control */
426 	adpt->automatic = true;
427 
428 	/* Disable single-pause-frame mode by default */
429 	adpt->single_pause_mode = false;
430 }
431 
432 /* Get the clock */
433 static int emac_clks_get(struct platform_device *pdev,
434 			 struct emac_adapter *adpt)
435 {
436 	unsigned int i;
437 
438 	for (i = 0; i < EMAC_CLK_CNT; i++) {
439 		struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
440 
441 		if (IS_ERR(clk)) {
442 			dev_err(&pdev->dev,
443 				"could not claim clock %s (error=%li)\n",
444 				emac_clk_name[i], PTR_ERR(clk));
445 
446 			return PTR_ERR(clk);
447 		}
448 
449 		adpt->clk[i] = clk;
450 	}
451 
452 	return 0;
453 }
454 
455 /* Initialize clocks */
456 static int emac_clks_phase1_init(struct platform_device *pdev,
457 				 struct emac_adapter *adpt)
458 {
459 	int ret;
460 
461 	/* On ACPI platforms, clocks are controlled by firmware and/or
462 	 * ACPI, not by drivers.
463 	 */
464 	if (has_acpi_companion(&pdev->dev))
465 		return 0;
466 
467 	ret = emac_clks_get(pdev, adpt);
468 	if (ret)
469 		return ret;
470 
471 	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
472 	if (ret)
473 		return ret;
474 
475 	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
476 	if (ret)
477 		return ret;
478 
479 	ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
480 	if (ret)
481 		return ret;
482 
483 	return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
484 }
485 
486 /* Enable clocks; needs emac_clks_phase1_init to be called before */
487 static int emac_clks_phase2_init(struct platform_device *pdev,
488 				 struct emac_adapter *adpt)
489 {
490 	int ret;
491 
492 	if (has_acpi_companion(&pdev->dev))
493 		return 0;
494 
495 	ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
496 	if (ret)
497 		return ret;
498 
499 	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
500 	if (ret)
501 		return ret;
502 
503 	ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
504 	if (ret)
505 		return ret;
506 
507 	ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
508 	if (ret)
509 		return ret;
510 
511 	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
512 	if (ret)
513 		return ret;
514 
515 	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
516 	if (ret)
517 		return ret;
518 
519 	return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
520 }
521 
522 static void emac_clks_teardown(struct emac_adapter *adpt)
523 {
524 
525 	unsigned int i;
526 
527 	for (i = 0; i < EMAC_CLK_CNT; i++)
528 		clk_disable_unprepare(adpt->clk[i]);
529 }
530 
531 /* Get the resources */
532 static int emac_probe_resources(struct platform_device *pdev,
533 				struct emac_adapter *adpt)
534 {
535 	struct net_device *netdev = adpt->netdev;
536 	char maddr[ETH_ALEN];
537 	int ret = 0;
538 
539 	/* get mac address */
540 	if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
541 		ether_addr_copy(netdev->dev_addr, maddr);
542 	else
543 		eth_hw_addr_random(netdev);
544 
545 	/* Core 0 interrupt */
546 	ret = platform_get_irq(pdev, 0);
547 	if (ret < 0)
548 		return ret;
549 	adpt->irq.irq = ret;
550 
551 	/* base register address */
552 	adpt->base = devm_platform_ioremap_resource(pdev, 0);
553 	if (IS_ERR(adpt->base))
554 		return PTR_ERR(adpt->base);
555 
556 	/* CSR register address */
557 	adpt->csr = devm_platform_ioremap_resource(pdev, 1);
558 	if (IS_ERR(adpt->csr))
559 		return PTR_ERR(adpt->csr);
560 
561 	netdev->base_addr = (unsigned long)adpt->base;
562 
563 	return 0;
564 }
565 
566 static const struct of_device_id emac_dt_match[] = {
567 	{
568 		.compatible = "qcom,fsm9900-emac",
569 	},
570 	{}
571 };
572 MODULE_DEVICE_TABLE(of, emac_dt_match);
573 
574 #if IS_ENABLED(CONFIG_ACPI)
575 static const struct acpi_device_id emac_acpi_match[] = {
576 	{
577 		.id = "QCOM8070",
578 	},
579 	{}
580 };
581 MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
582 #endif
583 
584 static int emac_probe(struct platform_device *pdev)
585 {
586 	struct net_device *netdev;
587 	struct emac_adapter *adpt;
588 	struct emac_sgmii *phy;
589 	u16 devid, revid;
590 	u32 reg;
591 	int ret;
592 
593 	/* The TPD buffer address is limited to:
594 	 * 1. PTP:	45bits. (Driver doesn't support yet.)
595 	 * 2. NON-PTP:	46bits.
596 	 */
597 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
598 	if (ret) {
599 		dev_err(&pdev->dev, "could not set DMA mask\n");
600 		return ret;
601 	}
602 
603 	netdev = alloc_etherdev(sizeof(struct emac_adapter));
604 	if (!netdev)
605 		return -ENOMEM;
606 
607 	dev_set_drvdata(&pdev->dev, netdev);
608 	SET_NETDEV_DEV(netdev, &pdev->dev);
609 	emac_set_ethtool_ops(netdev);
610 
611 	adpt = netdev_priv(netdev);
612 	adpt->netdev = netdev;
613 	adpt->msg_enable = EMAC_MSG_DEFAULT;
614 
615 	phy = &adpt->phy;
616 	atomic_set(&phy->decode_error_count, 0);
617 
618 	mutex_init(&adpt->reset_lock);
619 	spin_lock_init(&adpt->stats.lock);
620 
621 	adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
622 
623 	ret = emac_probe_resources(pdev, adpt);
624 	if (ret)
625 		goto err_undo_netdev;
626 
627 	/* initialize clocks */
628 	ret = emac_clks_phase1_init(pdev, adpt);
629 	if (ret) {
630 		dev_err(&pdev->dev, "could not initialize clocks\n");
631 		goto err_undo_netdev;
632 	}
633 
634 	netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
635 	netdev->irq = adpt->irq.irq;
636 
637 	netdev->netdev_ops = &emac_netdev_ops;
638 
639 	emac_init_adapter(adpt);
640 
641 	/* init external phy */
642 	ret = emac_phy_config(pdev, adpt);
643 	if (ret)
644 		goto err_undo_clocks;
645 
646 	/* init internal sgmii phy */
647 	ret = emac_sgmii_config(pdev, adpt);
648 	if (ret)
649 		goto err_undo_mdiobus;
650 
651 	/* enable clocks */
652 	ret = emac_clks_phase2_init(pdev, adpt);
653 	if (ret) {
654 		dev_err(&pdev->dev, "could not initialize clocks\n");
655 		goto err_undo_mdiobus;
656 	}
657 
658 	/* set hw features */
659 	netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
660 			NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
661 			NETIF_F_HW_VLAN_CTAG_TX;
662 	netdev->hw_features = netdev->features;
663 
664 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
665 				 NETIF_F_TSO | NETIF_F_TSO6;
666 
667 	/* MTU range: 46 - 9194 */
668 	netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE -
669 			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
670 	netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE -
671 			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
672 
673 	INIT_WORK(&adpt->work_thread, emac_work_thread);
674 
675 	/* Initialize queues */
676 	emac_mac_rx_tx_ring_init_all(pdev, adpt);
677 
678 	netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
679 		       NAPI_POLL_WEIGHT);
680 
681 	ret = register_netdev(netdev);
682 	if (ret) {
683 		dev_err(&pdev->dev, "could not register net device\n");
684 		goto err_undo_napi;
685 	}
686 
687 	reg =  readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
688 	devid = (reg & DEV_ID_NUM_BMSK)  >> DEV_ID_NUM_SHFT;
689 	revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
690 	reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
691 
692 	netif_info(adpt, probe, netdev,
693 		   "hardware id %d.%d, hardware version %d.%d.%d\n",
694 		   devid, revid,
695 		   (reg & MAJOR_BMSK) >> MAJOR_SHFT,
696 		   (reg & MINOR_BMSK) >> MINOR_SHFT,
697 		   (reg & STEP_BMSK)  >> STEP_SHFT);
698 
699 	return 0;
700 
701 err_undo_napi:
702 	netif_napi_del(&adpt->rx_q.napi);
703 err_undo_mdiobus:
704 	put_device(&adpt->phydev->mdio.dev);
705 	mdiobus_unregister(adpt->mii_bus);
706 err_undo_clocks:
707 	emac_clks_teardown(adpt);
708 err_undo_netdev:
709 	free_netdev(netdev);
710 
711 	return ret;
712 }
713 
714 static int emac_remove(struct platform_device *pdev)
715 {
716 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
717 	struct emac_adapter *adpt = netdev_priv(netdev);
718 
719 	unregister_netdev(netdev);
720 	netif_napi_del(&adpt->rx_q.napi);
721 
722 	emac_clks_teardown(adpt);
723 
724 	put_device(&adpt->phydev->mdio.dev);
725 	mdiobus_unregister(adpt->mii_bus);
726 	free_netdev(netdev);
727 
728 	if (adpt->phy.digital)
729 		iounmap(adpt->phy.digital);
730 	iounmap(adpt->phy.base);
731 
732 	return 0;
733 }
734 
735 static void emac_shutdown(struct platform_device *pdev)
736 {
737 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
738 	struct emac_adapter *adpt = netdev_priv(netdev);
739 
740 	if (netdev->flags & IFF_UP) {
741 		/* Closing the SGMII turns off its interrupts */
742 		emac_sgmii_close(adpt);
743 
744 		/* Resetting the MAC turns off all DMA and its interrupts */
745 		emac_mac_reset(adpt);
746 	}
747 }
748 
749 static struct platform_driver emac_platform_driver = {
750 	.probe	= emac_probe,
751 	.remove	= emac_remove,
752 	.driver = {
753 		.name		= "qcom-emac",
754 		.of_match_table = emac_dt_match,
755 		.acpi_match_table = ACPI_PTR(emac_acpi_match),
756 	},
757 	.shutdown = emac_shutdown,
758 };
759 
760 module_platform_driver(emac_platform_driver);
761 
762 MODULE_LICENSE("GPL v2");
763 MODULE_ALIAS("platform:qcom-emac");
764