1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dma/ti-cppi5.h>
14 #include <linux/etherdevice.h>
15 #include <linux/genalloc.h>
16 #include <linux/if_vlan.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/of_platform.h>
26 #include <linux/phy.h>
27 #include <linux/remoteproc/pruss.h>
28 #include <linux/regmap.h>
29 #include <linux/remoteproc.h>
30 
31 #include "icssg_prueth.h"
32 #include "icssg_mii_rt.h"
33 #include "../k3-cppi-desc-pool.h"
34 
35 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
36 
37 /* Netif debug messages possible */
38 #define PRUETH_EMAC_DEBUG       (NETIF_MSG_DRV | \
39 				 NETIF_MSG_PROBE | \
40 				 NETIF_MSG_LINK | \
41 				 NETIF_MSG_TIMER | \
42 				 NETIF_MSG_IFDOWN | \
43 				 NETIF_MSG_IFUP | \
44 				 NETIF_MSG_RX_ERR | \
45 				 NETIF_MSG_TX_ERR | \
46 				 NETIF_MSG_TX_QUEUED | \
47 				 NETIF_MSG_INTR | \
48 				 NETIF_MSG_TX_DONE | \
49 				 NETIF_MSG_RX_STATUS | \
50 				 NETIF_MSG_PKTDATA | \
51 				 NETIF_MSG_HW | \
52 				 NETIF_MSG_WOL)
53 
54 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
55 
56 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
57 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
58 
59 static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
60 				   struct prueth_rx_chn *rx_chn,
61 				   int max_rflows)
62 {
63 	if (rx_chn->desc_pool)
64 		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
65 
66 	if (rx_chn->rx_chn)
67 		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
68 }
69 
70 static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
71 {
72 	int i;
73 
74 	for (i = 0; i < emac->tx_ch_num; i++) {
75 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
76 
77 		if (tx_chn->desc_pool)
78 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
79 
80 		if (tx_chn->tx_chn)
81 			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
82 
83 		/* Assume prueth_cleanup_tx_chns() is called at the
84 		 * end after all channel resources are freed
85 		 */
86 		memset(tx_chn, 0, sizeof(*tx_chn));
87 	}
88 }
89 
90 static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
91 {
92 	int i;
93 
94 	for (i = 0; i < num; i++) {
95 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
96 
97 		if (tx_chn->irq)
98 			free_irq(tx_chn->irq, tx_chn);
99 		netif_napi_del(&tx_chn->napi_tx);
100 	}
101 }
102 
103 static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
104 			     struct cppi5_host_desc_t *desc)
105 {
106 	struct cppi5_host_desc_t *first_desc, *next_desc;
107 	dma_addr_t buf_dma, next_desc_dma;
108 	u32 buf_dma_len;
109 
110 	first_desc = desc;
111 	next_desc = first_desc;
112 
113 	cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
114 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
115 
116 	dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
117 			 DMA_TO_DEVICE);
118 
119 	next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
120 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
121 	while (next_desc_dma) {
122 		next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
123 						       next_desc_dma);
124 		cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
125 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
126 
127 		dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
128 			       DMA_TO_DEVICE);
129 
130 		next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
131 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
132 
133 		k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
134 	}
135 
136 	k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
137 }
138 
139 static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
140 				    int budget)
141 {
142 	struct net_device *ndev = emac->ndev;
143 	struct cppi5_host_desc_t *desc_tx;
144 	struct netdev_queue *netif_txq;
145 	struct prueth_tx_chn *tx_chn;
146 	unsigned int total_bytes = 0;
147 	struct sk_buff *skb;
148 	dma_addr_t desc_dma;
149 	int res, num_tx = 0;
150 	void **swdata;
151 
152 	tx_chn = &emac->tx_chns[chn];
153 
154 	while (true) {
155 		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
156 		if (res == -ENODATA)
157 			break;
158 
159 		/* teardown completion */
160 		if (cppi5_desc_is_tdcm(desc_dma)) {
161 			if (atomic_dec_and_test(&emac->tdown_cnt))
162 				complete(&emac->tdown_complete);
163 			break;
164 		}
165 
166 		desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
167 						     desc_dma);
168 		swdata = cppi5_hdesc_get_swdata(desc_tx);
169 
170 		skb = *(swdata);
171 		prueth_xmit_free(tx_chn, desc_tx);
172 
173 		ndev = skb->dev;
174 		ndev->stats.tx_packets++;
175 		ndev->stats.tx_bytes += skb->len;
176 		total_bytes += skb->len;
177 		napi_consume_skb(skb, budget);
178 		num_tx++;
179 	}
180 
181 	if (!num_tx)
182 		return 0;
183 
184 	netif_txq = netdev_get_tx_queue(ndev, chn);
185 	netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
186 
187 	if (netif_tx_queue_stopped(netif_txq)) {
188 		/* If the TX queue was stopped, wake it now
189 		 * if we have enough room.
190 		 */
191 		__netif_tx_lock(netif_txq, smp_processor_id());
192 		if (netif_running(ndev) &&
193 		    (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
194 		     MAX_SKB_FRAGS))
195 			netif_tx_wake_queue(netif_txq);
196 		__netif_tx_unlock(netif_txq);
197 	}
198 
199 	return num_tx;
200 }
201 
202 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
203 {
204 	struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
205 	struct prueth_emac *emac = tx_chn->emac;
206 	int num_tx_packets;
207 
208 	num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
209 
210 	if (num_tx_packets >= budget)
211 		return budget;
212 
213 	if (napi_complete_done(napi_tx, num_tx_packets))
214 		enable_irq(tx_chn->irq);
215 
216 	return num_tx_packets;
217 }
218 
219 static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
220 {
221 	struct prueth_tx_chn *tx_chn = dev_id;
222 
223 	disable_irq_nosync(irq);
224 	napi_schedule(&tx_chn->napi_tx);
225 
226 	return IRQ_HANDLED;
227 }
228 
229 static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
230 {
231 	struct prueth *prueth = emac->prueth;
232 	int i, ret;
233 
234 	for (i = 0; i < emac->tx_ch_num; i++) {
235 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
236 
237 		netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
238 		ret = request_irq(tx_chn->irq, prueth_tx_irq,
239 				  IRQF_TRIGGER_HIGH, tx_chn->name,
240 				  tx_chn);
241 		if (ret) {
242 			netif_napi_del(&tx_chn->napi_tx);
243 			dev_err(prueth->dev, "unable to request TX IRQ %d\n",
244 				tx_chn->irq);
245 			goto fail;
246 		}
247 	}
248 
249 	return 0;
250 fail:
251 	prueth_ndev_del_tx_napi(emac, i);
252 	return ret;
253 }
254 
255 static int prueth_init_tx_chns(struct prueth_emac *emac)
256 {
257 	static const struct k3_ring_cfg ring_cfg = {
258 		.elm_size = K3_RINGACC_RING_ELSIZE_8,
259 		.mode = K3_RINGACC_RING_MODE_RING,
260 		.flags = 0,
261 		.size = PRUETH_MAX_TX_DESC,
262 	};
263 	struct k3_udma_glue_tx_channel_cfg tx_cfg;
264 	struct device *dev = emac->prueth->dev;
265 	struct net_device *ndev = emac->ndev;
266 	int ret, slice, i;
267 	u32 hdesc_size;
268 
269 	slice = prueth_emac_slice(emac);
270 	if (slice < 0)
271 		return slice;
272 
273 	init_completion(&emac->tdown_complete);
274 
275 	hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
276 					   PRUETH_NAV_SW_DATA_SIZE);
277 	memset(&tx_cfg, 0, sizeof(tx_cfg));
278 	tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
279 	tx_cfg.tx_cfg = ring_cfg;
280 	tx_cfg.txcq_cfg = ring_cfg;
281 
282 	for (i = 0; i < emac->tx_ch_num; i++) {
283 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
284 
285 		/* To differentiate channels for SLICE0 vs SLICE1 */
286 		snprintf(tx_chn->name, sizeof(tx_chn->name),
287 			 "tx%d-%d", slice, i);
288 
289 		tx_chn->emac = emac;
290 		tx_chn->id = i;
291 		tx_chn->descs_num = PRUETH_MAX_TX_DESC;
292 
293 		tx_chn->tx_chn =
294 			k3_udma_glue_request_tx_chn(dev, tx_chn->name,
295 						    &tx_cfg);
296 		if (IS_ERR(tx_chn->tx_chn)) {
297 			ret = PTR_ERR(tx_chn->tx_chn);
298 			tx_chn->tx_chn = NULL;
299 			netdev_err(ndev,
300 				   "Failed to request tx dma ch: %d\n", ret);
301 			goto fail;
302 		}
303 
304 		tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
305 		tx_chn->desc_pool =
306 			k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
307 						      tx_chn->descs_num,
308 						      hdesc_size,
309 						      tx_chn->name);
310 		if (IS_ERR(tx_chn->desc_pool)) {
311 			ret = PTR_ERR(tx_chn->desc_pool);
312 			tx_chn->desc_pool = NULL;
313 			netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
314 			goto fail;
315 		}
316 
317 		tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
318 		if (tx_chn->irq <= 0) {
319 			ret = -EINVAL;
320 			netdev_err(ndev, "failed to get tx irq\n");
321 			goto fail;
322 		}
323 
324 		snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
325 			 dev_name(dev), tx_chn->id);
326 	}
327 
328 	return 0;
329 
330 fail:
331 	prueth_cleanup_tx_chns(emac);
332 	return ret;
333 }
334 
335 static int prueth_init_rx_chns(struct prueth_emac *emac,
336 			       struct prueth_rx_chn *rx_chn,
337 			       char *name, u32 max_rflows,
338 			       u32 max_desc_num)
339 {
340 	struct k3_udma_glue_rx_channel_cfg rx_cfg;
341 	struct device *dev = emac->prueth->dev;
342 	struct net_device *ndev = emac->ndev;
343 	u32 fdqring_id, hdesc_size;
344 	int i, ret = 0, slice;
345 
346 	slice = prueth_emac_slice(emac);
347 	if (slice < 0)
348 		return slice;
349 
350 	/* To differentiate channels for SLICE0 vs SLICE1 */
351 	snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
352 
353 	hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
354 					   PRUETH_NAV_SW_DATA_SIZE);
355 	memset(&rx_cfg, 0, sizeof(rx_cfg));
356 	rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
357 	rx_cfg.flow_id_num = max_rflows;
358 	rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
359 
360 	/* init all flows */
361 	rx_chn->dev = dev;
362 	rx_chn->descs_num = max_desc_num;
363 
364 	rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
365 						     &rx_cfg);
366 	if (IS_ERR(rx_chn->rx_chn)) {
367 		ret = PTR_ERR(rx_chn->rx_chn);
368 		rx_chn->rx_chn = NULL;
369 		netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
370 		goto fail;
371 	}
372 
373 	rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
374 	rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
375 							  rx_chn->descs_num,
376 							  hdesc_size,
377 							  rx_chn->name);
378 	if (IS_ERR(rx_chn->desc_pool)) {
379 		ret = PTR_ERR(rx_chn->desc_pool);
380 		rx_chn->desc_pool = NULL;
381 		netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
382 		goto fail;
383 	}
384 
385 	emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
386 	netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
387 
388 	fdqring_id = K3_RINGACC_RING_ID_ANY;
389 	for (i = 0; i < rx_cfg.flow_id_num; i++) {
390 		struct k3_ring_cfg rxring_cfg = {
391 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
392 			.mode = K3_RINGACC_RING_MODE_RING,
393 			.flags = 0,
394 		};
395 		struct k3_ring_cfg fdqring_cfg = {
396 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
397 			.flags = K3_RINGACC_RING_SHARED,
398 		};
399 		struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
400 			.rx_cfg = rxring_cfg,
401 			.rxfdq_cfg = fdqring_cfg,
402 			.ring_rxq_id = K3_RINGACC_RING_ID_ANY,
403 			.src_tag_lo_sel =
404 				K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
405 		};
406 
407 		rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
408 		rx_flow_cfg.rx_cfg.size = max_desc_num;
409 		rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
410 		rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
411 
412 		ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
413 						i, &rx_flow_cfg);
414 		if (ret) {
415 			netdev_err(ndev, "Failed to init rx flow%d %d\n",
416 				   i, ret);
417 			goto fail;
418 		}
419 		if (!i)
420 			fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
421 								     i);
422 		rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
423 		if (rx_chn->irq[i] <= 0) {
424 			ret = rx_chn->irq[i];
425 			netdev_err(ndev, "Failed to get rx dma irq");
426 			goto fail;
427 		}
428 	}
429 
430 	return 0;
431 
432 fail:
433 	prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
434 	return ret;
435 }
436 
437 static int prueth_dma_rx_push(struct prueth_emac *emac,
438 			      struct sk_buff *skb,
439 			      struct prueth_rx_chn *rx_chn)
440 {
441 	struct net_device *ndev = emac->ndev;
442 	struct cppi5_host_desc_t *desc_rx;
443 	u32 pkt_len = skb_tailroom(skb);
444 	dma_addr_t desc_dma;
445 	dma_addr_t buf_dma;
446 	void **swdata;
447 
448 	desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
449 	if (!desc_rx) {
450 		netdev_err(ndev, "rx push: failed to allocate descriptor\n");
451 		return -ENOMEM;
452 	}
453 	desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
454 
455 	buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
456 	if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
457 		k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
458 		netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
459 		return -EINVAL;
460 	}
461 
462 	cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
463 			 PRUETH_NAV_PS_DATA_SIZE);
464 	k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
465 	cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
466 
467 	swdata = cppi5_hdesc_get_swdata(desc_rx);
468 	*swdata = skb;
469 
470 	return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
471 					desc_rx, desc_dma);
472 }
473 
474 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
475 {
476 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
477 	u32 buf_dma_len, pkt_len, port_id = 0;
478 	struct net_device *ndev = emac->ndev;
479 	struct cppi5_host_desc_t *desc_rx;
480 	struct sk_buff *skb, *new_skb;
481 	dma_addr_t desc_dma, buf_dma;
482 	void **swdata;
483 	int ret;
484 
485 	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
486 	if (ret) {
487 		if (ret != -ENODATA)
488 			netdev_err(ndev, "rx pop: failed: %d\n", ret);
489 		return ret;
490 	}
491 
492 	if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
493 		return 0;
494 
495 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
496 
497 	swdata = cppi5_hdesc_get_swdata(desc_rx);
498 	skb = *swdata;
499 
500 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
501 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
502 	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
503 	/* firmware adds 4 CRC bytes, strip them */
504 	pkt_len -= 4;
505 	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
506 
507 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
508 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
509 
510 	skb->dev = ndev;
511 	new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
512 	/* if allocation fails we drop the packet but push the
513 	 * descriptor back to the ring with old skb to prevent a stall
514 	 */
515 	if (!new_skb) {
516 		ndev->stats.rx_dropped++;
517 		new_skb = skb;
518 	} else {
519 		/* send the filled skb up the n/w stack */
520 		skb_put(skb, pkt_len);
521 		skb->protocol = eth_type_trans(skb, ndev);
522 		napi_gro_receive(&emac->napi_rx, skb);
523 		ndev->stats.rx_bytes += pkt_len;
524 		ndev->stats.rx_packets++;
525 	}
526 
527 	/* queue another RX DMA */
528 	ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
529 	if (WARN_ON(ret < 0)) {
530 		dev_kfree_skb_any(new_skb);
531 		ndev->stats.rx_errors++;
532 		ndev->stats.rx_dropped++;
533 	}
534 
535 	return ret;
536 }
537 
538 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
539 {
540 	struct prueth_rx_chn *rx_chn = data;
541 	struct cppi5_host_desc_t *desc_rx;
542 	struct sk_buff *skb;
543 	dma_addr_t buf_dma;
544 	u32 buf_dma_len;
545 	void **swdata;
546 
547 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
548 	swdata = cppi5_hdesc_get_swdata(desc_rx);
549 	skb = *swdata;
550 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
551 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
552 
553 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
554 			 DMA_FROM_DEVICE);
555 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
556 
557 	dev_kfree_skb_any(skb);
558 }
559 
560 /**
561  * emac_ndo_start_xmit - EMAC Transmit function
562  * @skb: SKB pointer
563  * @ndev: EMAC network adapter
564  *
565  * Called by the system to transmit a packet  - we queue the packet in
566  * EMAC hardware transmit queue
567  * Doesn't wait for completion we'll check for TX completion in
568  * emac_tx_complete_packets().
569  *
570  * Return: enum netdev_tx
571  */
572 static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
573 {
574 	struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
575 	struct prueth_emac *emac = netdev_priv(ndev);
576 	struct netdev_queue *netif_txq;
577 	struct prueth_tx_chn *tx_chn;
578 	dma_addr_t desc_dma, buf_dma;
579 	int i, ret = 0, q_idx;
580 	void **swdata;
581 	u32 pkt_len;
582 	u32 *epib;
583 
584 	pkt_len = skb_headlen(skb);
585 	q_idx = skb_get_queue_mapping(skb);
586 
587 	tx_chn = &emac->tx_chns[q_idx];
588 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
589 
590 	/* Map the linear buffer */
591 	buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
592 	if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
593 		netdev_err(ndev, "tx: failed to map skb buffer\n");
594 		ret = NETDEV_TX_OK;
595 		goto drop_free_skb;
596 	}
597 
598 	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
599 	if (!first_desc) {
600 		netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
601 		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
602 		goto drop_stop_q_busy;
603 	}
604 
605 	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
606 			 PRUETH_NAV_PS_DATA_SIZE);
607 	cppi5_hdesc_set_pkttype(first_desc, 0);
608 	epib = first_desc->epib;
609 	epib[0] = 0;
610 	epib[1] = 0;
611 
612 	/* set dst tag to indicate internal qid at the firmware which is at
613 	 * bit8..bit15. bit0..bit7 indicates port num for directed
614 	 * packets in case of switch mode operation
615 	 */
616 	cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
617 	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
618 	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
619 	swdata = cppi5_hdesc_get_swdata(first_desc);
620 	*swdata = skb;
621 
622 	/* Handle the case where skb is fragmented in pages */
623 	cur_desc = first_desc;
624 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
625 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
626 		u32 frag_size = skb_frag_size(frag);
627 
628 		next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
629 		if (!next_desc) {
630 			netdev_err(ndev,
631 				   "tx: failed to allocate frag. descriptor\n");
632 			goto free_desc_stop_q_busy;
633 		}
634 
635 		buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
636 					   DMA_TO_DEVICE);
637 		if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
638 			netdev_err(ndev, "tx: Failed to map skb page\n");
639 			k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
640 			ret = NETDEV_TX_OK;
641 			goto drop_free_descs;
642 		}
643 
644 		cppi5_hdesc_reset_hbdesc(next_desc);
645 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
646 		cppi5_hdesc_attach_buf(next_desc,
647 				       buf_dma, frag_size, buf_dma, frag_size);
648 
649 		desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
650 						      next_desc);
651 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
652 		cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
653 
654 		pkt_len += frag_size;
655 		cur_desc = next_desc;
656 	}
657 	WARN_ON_ONCE(pkt_len != skb->len);
658 
659 	/* report bql before sending packet */
660 	netdev_tx_sent_queue(netif_txq, pkt_len);
661 
662 	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
663 	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
664 	/* cppi5_desc_dump(first_desc, 64); */
665 
666 	skb_tx_timestamp(skb);  /* SW timestamp if SKBTX_IN_PROGRESS not set */
667 	ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
668 	if (ret) {
669 		netdev_err(ndev, "tx: push failed: %d\n", ret);
670 		goto drop_free_descs;
671 	}
672 
673 	if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
674 		netif_tx_stop_queue(netif_txq);
675 		/* Barrier, so that stop_queue visible to other cpus */
676 		smp_mb__after_atomic();
677 
678 		if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
679 		    MAX_SKB_FRAGS)
680 			netif_tx_wake_queue(netif_txq);
681 	}
682 
683 	return NETDEV_TX_OK;
684 
685 drop_free_descs:
686 	prueth_xmit_free(tx_chn, first_desc);
687 
688 drop_free_skb:
689 	dev_kfree_skb_any(skb);
690 
691 	/* error */
692 	ndev->stats.tx_dropped++;
693 	netdev_err(ndev, "tx: error: %d\n", ret);
694 
695 	return ret;
696 
697 free_desc_stop_q_busy:
698 	prueth_xmit_free(tx_chn, first_desc);
699 
700 drop_stop_q_busy:
701 	netif_tx_stop_queue(netif_txq);
702 	return NETDEV_TX_BUSY;
703 }
704 
705 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
706 {
707 	struct prueth_tx_chn *tx_chn = data;
708 	struct cppi5_host_desc_t *desc_tx;
709 	struct sk_buff *skb;
710 	void **swdata;
711 
712 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
713 	swdata = cppi5_hdesc_get_swdata(desc_tx);
714 	skb = *(swdata);
715 	prueth_xmit_free(tx_chn, desc_tx);
716 
717 	dev_kfree_skb_any(skb);
718 }
719 
720 static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
721 {
722 	struct prueth_emac *emac = dev_id;
723 
724 	disable_irq_nosync(irq);
725 	napi_schedule(&emac->napi_rx);
726 
727 	return IRQ_HANDLED;
728 }
729 
730 struct icssg_firmwares {
731 	char *pru;
732 	char *rtu;
733 	char *txpru;
734 };
735 
736 static struct icssg_firmwares icssg_emac_firmwares[] = {
737 	{
738 		.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
739 		.rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
740 		.txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
741 	},
742 	{
743 		.pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
744 		.rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
745 		.txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
746 	}
747 };
748 
749 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
750 {
751 	struct icssg_firmwares *firmwares;
752 	struct device *dev = prueth->dev;
753 	int slice, ret;
754 
755 	firmwares = icssg_emac_firmwares;
756 
757 	slice = prueth_emac_slice(emac);
758 	if (slice < 0) {
759 		netdev_err(emac->ndev, "invalid port\n");
760 		return -EINVAL;
761 	}
762 
763 	ret = icssg_config(prueth, emac, slice);
764 	if (ret)
765 		return ret;
766 
767 	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
768 	ret = rproc_boot(prueth->pru[slice]);
769 	if (ret) {
770 		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
771 		return -EINVAL;
772 	}
773 
774 	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
775 	ret = rproc_boot(prueth->rtu[slice]);
776 	if (ret) {
777 		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
778 		goto halt_pru;
779 	}
780 
781 	ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
782 	ret = rproc_boot(prueth->txpru[slice]);
783 	if (ret) {
784 		dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
785 		goto halt_rtu;
786 	}
787 
788 	emac->fw_running = 1;
789 	return 0;
790 
791 halt_rtu:
792 	rproc_shutdown(prueth->rtu[slice]);
793 
794 halt_pru:
795 	rproc_shutdown(prueth->pru[slice]);
796 
797 	return ret;
798 }
799 
800 static void prueth_emac_stop(struct prueth_emac *emac)
801 {
802 	struct prueth *prueth = emac->prueth;
803 	int slice;
804 
805 	switch (emac->port_id) {
806 	case PRUETH_PORT_MII0:
807 		slice = ICSS_SLICE0;
808 		break;
809 	case PRUETH_PORT_MII1:
810 		slice = ICSS_SLICE1;
811 		break;
812 	default:
813 		netdev_err(emac->ndev, "invalid port\n");
814 		return;
815 	}
816 
817 	emac->fw_running = 0;
818 	rproc_shutdown(prueth->txpru[slice]);
819 	rproc_shutdown(prueth->rtu[slice]);
820 	rproc_shutdown(prueth->pru[slice]);
821 }
822 
823 /* called back by PHY layer if there is change in link state of hw port*/
824 static void emac_adjust_link(struct net_device *ndev)
825 {
826 	struct prueth_emac *emac = netdev_priv(ndev);
827 	struct phy_device *phydev = ndev->phydev;
828 	struct prueth *prueth = emac->prueth;
829 	bool new_state = false;
830 	unsigned long flags;
831 
832 	if (phydev->link) {
833 		/* check the mode of operation - full/half duplex */
834 		if (phydev->duplex != emac->duplex) {
835 			new_state = true;
836 			emac->duplex = phydev->duplex;
837 		}
838 		if (phydev->speed != emac->speed) {
839 			new_state = true;
840 			emac->speed = phydev->speed;
841 		}
842 		if (!emac->link) {
843 			new_state = true;
844 			emac->link = 1;
845 		}
846 	} else if (emac->link) {
847 		new_state = true;
848 		emac->link = 0;
849 
850 		/* f/w should support 100 & 1000 */
851 		emac->speed = SPEED_1000;
852 
853 		/* half duplex may not be supported by f/w */
854 		emac->duplex = DUPLEX_FULL;
855 	}
856 
857 	if (new_state) {
858 		phy_print_status(phydev);
859 
860 		/* update RGMII and MII configuration based on PHY negotiated
861 		 * values
862 		 */
863 		if (emac->link) {
864 			/* Set the RGMII cfg for gig en and full duplex */
865 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
866 
867 			/* update the Tx IPG based on 100M/1G speed */
868 			spin_lock_irqsave(&emac->lock, flags);
869 			icssg_config_ipg(emac);
870 			spin_unlock_irqrestore(&emac->lock, flags);
871 			icssg_config_set_speed(emac);
872 			emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
873 
874 		} else {
875 			emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
876 		}
877 	}
878 
879 	if (emac->link) {
880 		/* reactivate the transmit queue */
881 		netif_tx_wake_all_queues(ndev);
882 	} else {
883 		netif_tx_stop_all_queues(ndev);
884 	}
885 }
886 
887 static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
888 {
889 	struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
890 	int rx_flow = PRUETH_RX_FLOW_DATA;
891 	int flow = PRUETH_MAX_RX_FLOWS;
892 	int num_rx = 0;
893 	int cur_budget;
894 	int ret;
895 
896 	while (flow--) {
897 		cur_budget = budget - num_rx;
898 
899 		while (cur_budget--) {
900 			ret = emac_rx_packet(emac, flow);
901 			if (ret)
902 				break;
903 			num_rx++;
904 		}
905 
906 		if (num_rx >= budget)
907 			break;
908 	}
909 
910 	if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
911 		enable_irq(emac->rx_chns.irq[rx_flow]);
912 
913 	return num_rx;
914 }
915 
916 static int prueth_prepare_rx_chan(struct prueth_emac *emac,
917 				  struct prueth_rx_chn *chn,
918 				  int buf_size)
919 {
920 	struct sk_buff *skb;
921 	int i, ret;
922 
923 	for (i = 0; i < chn->descs_num; i++) {
924 		skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
925 		if (!skb)
926 			return -ENOMEM;
927 
928 		ret = prueth_dma_rx_push(emac, skb, chn);
929 		if (ret < 0) {
930 			netdev_err(emac->ndev,
931 				   "cannot submit skb for rx chan %s ret %d\n",
932 				   chn->name, ret);
933 			kfree_skb(skb);
934 			return ret;
935 		}
936 	}
937 
938 	return 0;
939 }
940 
941 static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
942 				 bool free_skb)
943 {
944 	int i;
945 
946 	for (i = 0; i < ch_num; i++) {
947 		if (free_skb)
948 			k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
949 						  &emac->tx_chns[i],
950 						  prueth_tx_cleanup);
951 		k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
952 	}
953 }
954 
955 static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
956 				 int num_flows, bool disable)
957 {
958 	int i;
959 
960 	for (i = 0; i < num_flows; i++)
961 		k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
962 					  prueth_rx_cleanup, !!i);
963 	if (disable)
964 		k3_udma_glue_disable_rx_chn(chn->rx_chn);
965 }
966 
967 static int emac_phy_connect(struct prueth_emac *emac)
968 {
969 	struct prueth *prueth = emac->prueth;
970 	struct net_device *ndev = emac->ndev;
971 	/* connect PHY */
972 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
973 				      &emac_adjust_link, 0,
974 				      emac->phy_if);
975 	if (!ndev->phydev) {
976 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
977 			emac->phy_node->full_name);
978 		return -ENODEV;
979 	}
980 
981 	/* remove unsupported modes */
982 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
983 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
984 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
985 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
986 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
987 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
988 
989 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
990 		phy_set_max_speed(ndev->phydev, SPEED_100);
991 
992 	return 0;
993 }
994 
995 /**
996  * emac_ndo_open - EMAC device open
997  * @ndev: network adapter device
998  *
999  * Called when system wants to start the interface.
1000  *
1001  * Return: 0 for a successful open, or appropriate error code
1002  */
1003 static int emac_ndo_open(struct net_device *ndev)
1004 {
1005 	struct prueth_emac *emac = netdev_priv(ndev);
1006 	int ret, i, num_data_chn = emac->tx_ch_num;
1007 	struct prueth *prueth = emac->prueth;
1008 	int slice = prueth_emac_slice(emac);
1009 	struct device *dev = prueth->dev;
1010 	int max_rx_flows;
1011 	int rx_flow;
1012 
1013 	/* clear SMEM and MSMC settings for all slices */
1014 	if (!prueth->emacs_initialized) {
1015 		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
1016 		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
1017 	}
1018 
1019 	/* set h/w MAC as user might have re-configured */
1020 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1021 
1022 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
1023 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
1024 
1025 	icssg_class_default(prueth->miig_rt, slice, 0);
1026 
1027 	/* Notify the stack of the actual queue counts. */
1028 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
1029 	if (ret) {
1030 		dev_err(dev, "cannot set real number of tx queues\n");
1031 		return ret;
1032 	}
1033 
1034 	init_completion(&emac->cmd_complete);
1035 	ret = prueth_init_tx_chns(emac);
1036 	if (ret) {
1037 		dev_err(dev, "failed to init tx channel: %d\n", ret);
1038 		return ret;
1039 	}
1040 
1041 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
1042 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
1043 				  max_rx_flows, PRUETH_MAX_RX_DESC);
1044 	if (ret) {
1045 		dev_err(dev, "failed to init rx channel: %d\n", ret);
1046 		goto cleanup_tx;
1047 	}
1048 
1049 	ret = prueth_ndev_add_tx_napi(emac);
1050 	if (ret)
1051 		goto cleanup_rx;
1052 
1053 	/* we use only the highest priority flow for now i.e. @irq[3] */
1054 	rx_flow = PRUETH_RX_FLOW_DATA;
1055 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
1056 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
1057 	if (ret) {
1058 		dev_err(dev, "unable to request RX IRQ\n");
1059 		goto cleanup_napi;
1060 	}
1061 
1062 	/* reset and start PRU firmware */
1063 	ret = prueth_emac_start(prueth, emac);
1064 	if (ret)
1065 		goto free_rx_irq;
1066 
1067 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
1068 
1069 	/* Prepare RX */
1070 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
1071 	if (ret)
1072 		goto stop;
1073 
1074 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
1075 	if (ret)
1076 		goto reset_rx_chn;
1077 
1078 	for (i = 0; i < emac->tx_ch_num; i++) {
1079 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
1080 		if (ret)
1081 			goto reset_tx_chan;
1082 	}
1083 
1084 	/* Enable NAPI in Tx and Rx direction */
1085 	for (i = 0; i < emac->tx_ch_num; i++)
1086 		napi_enable(&emac->tx_chns[i].napi_tx);
1087 	napi_enable(&emac->napi_rx);
1088 
1089 	/* start PHY */
1090 	phy_start(ndev->phydev);
1091 
1092 	prueth->emacs_initialized++;
1093 
1094 	queue_work(system_long_wq, &emac->stats_work.work);
1095 
1096 	return 0;
1097 
1098 reset_tx_chan:
1099 	/* Since interface is not yet up, there is wouldn't be
1100 	 * any SKB for completion. So set false to free_skb
1101 	 */
1102 	prueth_reset_tx_chan(emac, i, false);
1103 reset_rx_chn:
1104 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
1105 stop:
1106 	prueth_emac_stop(emac);
1107 free_rx_irq:
1108 	free_irq(emac->rx_chns.irq[rx_flow], emac);
1109 cleanup_napi:
1110 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
1111 cleanup_rx:
1112 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
1113 cleanup_tx:
1114 	prueth_cleanup_tx_chns(emac);
1115 
1116 	return ret;
1117 }
1118 
1119 /**
1120  * emac_ndo_stop - EMAC device stop
1121  * @ndev: network adapter device
1122  *
1123  * Called when system wants to stop or down the interface.
1124  *
1125  * Return: Always 0 (Success)
1126  */
1127 static int emac_ndo_stop(struct net_device *ndev)
1128 {
1129 	struct prueth_emac *emac = netdev_priv(ndev);
1130 	struct prueth *prueth = emac->prueth;
1131 	int rx_flow = PRUETH_RX_FLOW_DATA;
1132 	int max_rx_flows;
1133 	int ret, i;
1134 
1135 	/* inform the upper layers. */
1136 	netif_tx_stop_all_queues(ndev);
1137 
1138 	/* block packets from wire */
1139 	if (ndev->phydev)
1140 		phy_stop(ndev->phydev);
1141 
1142 	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
1143 
1144 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
1145 	/* ensure new tdown_cnt value is visible */
1146 	smp_mb__after_atomic();
1147 	/* tear down and disable UDMA channels */
1148 	reinit_completion(&emac->tdown_complete);
1149 	for (i = 0; i < emac->tx_ch_num; i++)
1150 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
1151 
1152 	ret = wait_for_completion_timeout(&emac->tdown_complete,
1153 					  msecs_to_jiffies(1000));
1154 	if (!ret)
1155 		netdev_err(ndev, "tx teardown timeout\n");
1156 
1157 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
1158 	for (i = 0; i < emac->tx_ch_num; i++)
1159 		napi_disable(&emac->tx_chns[i].napi_tx);
1160 
1161 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
1162 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
1163 
1164 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
1165 
1166 	napi_disable(&emac->napi_rx);
1167 
1168 	cancel_work_sync(&emac->rx_mode_work);
1169 
1170 	/* Destroying the queued work in ndo_stop() */
1171 	cancel_delayed_work_sync(&emac->stats_work);
1172 
1173 	/* stop PRUs */
1174 	prueth_emac_stop(emac);
1175 
1176 	free_irq(emac->rx_chns.irq[rx_flow], emac);
1177 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
1178 	prueth_cleanup_tx_chns(emac);
1179 
1180 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
1181 	prueth_cleanup_tx_chns(emac);
1182 
1183 	prueth->emacs_initialized--;
1184 
1185 	return 0;
1186 }
1187 
1188 static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1189 {
1190 	ndev->stats.tx_errors++;
1191 }
1192 
1193 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
1194 {
1195 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
1196 	struct net_device *ndev = emac->ndev;
1197 	bool promisc, allmulti;
1198 
1199 	if (!netif_running(ndev))
1200 		return;
1201 
1202 	promisc = ndev->flags & IFF_PROMISC;
1203 	allmulti = ndev->flags & IFF_ALLMULTI;
1204 	emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
1205 	emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
1206 
1207 	if (promisc) {
1208 		emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
1209 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1210 		return;
1211 	}
1212 
1213 	if (allmulti) {
1214 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1215 		return;
1216 	}
1217 
1218 	if (!netdev_mc_empty(ndev)) {
1219 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1220 		return;
1221 	}
1222 }
1223 
1224 /**
1225  * emac_ndo_set_rx_mode - EMAC set receive mode function
1226  * @ndev: The EMAC network adapter
1227  *
1228  * Called when system wants to set the receive mode of the device.
1229  *
1230  */
1231 static void emac_ndo_set_rx_mode(struct net_device *ndev)
1232 {
1233 	struct prueth_emac *emac = netdev_priv(ndev);
1234 
1235 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
1236 }
1237 
1238 static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1239 {
1240 	return phy_do_ioctl(ndev, ifr, cmd);
1241 }
1242 
1243 static void emac_ndo_get_stats64(struct net_device *ndev,
1244 				 struct rtnl_link_stats64 *stats)
1245 {
1246 	struct prueth_emac *emac = netdev_priv(ndev);
1247 
1248 	emac_update_hardware_stats(emac);
1249 
1250 	stats->rx_packets     = emac_get_stat_by_name(emac, "rx_packets");
1251 	stats->rx_bytes       = emac_get_stat_by_name(emac, "rx_bytes");
1252 	stats->tx_packets     = emac_get_stat_by_name(emac, "tx_packets");
1253 	stats->tx_bytes       = emac_get_stat_by_name(emac, "tx_bytes");
1254 	stats->rx_crc_errors  = emac_get_stat_by_name(emac, "rx_crc_errors");
1255 	stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
1256 	stats->multicast      = emac_get_stat_by_name(emac, "rx_multicast_frames");
1257 
1258 	stats->rx_errors  = ndev->stats.rx_errors;
1259 	stats->rx_dropped = ndev->stats.rx_dropped;
1260 	stats->tx_errors  = ndev->stats.tx_errors;
1261 	stats->tx_dropped = ndev->stats.tx_dropped;
1262 }
1263 
1264 static const struct net_device_ops emac_netdev_ops = {
1265 	.ndo_open = emac_ndo_open,
1266 	.ndo_stop = emac_ndo_stop,
1267 	.ndo_start_xmit = emac_ndo_start_xmit,
1268 	.ndo_set_mac_address = eth_mac_addr,
1269 	.ndo_validate_addr = eth_validate_addr,
1270 	.ndo_tx_timeout = emac_ndo_tx_timeout,
1271 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1272 	.ndo_eth_ioctl = emac_ndo_ioctl,
1273 	.ndo_get_stats64 = emac_ndo_get_stats64,
1274 };
1275 
1276 /* get emac_port corresponding to eth_node name */
1277 static int prueth_node_port(struct device_node *eth_node)
1278 {
1279 	u32 port_id;
1280 	int ret;
1281 
1282 	ret = of_property_read_u32(eth_node, "reg", &port_id);
1283 	if (ret)
1284 		return ret;
1285 
1286 	if (port_id == 0)
1287 		return PRUETH_PORT_MII0;
1288 	else if (port_id == 1)
1289 		return PRUETH_PORT_MII1;
1290 	else
1291 		return PRUETH_PORT_INVALID;
1292 }
1293 
1294 /* get MAC instance corresponding to eth_node name */
1295 static int prueth_node_mac(struct device_node *eth_node)
1296 {
1297 	u32 port_id;
1298 	int ret;
1299 
1300 	ret = of_property_read_u32(eth_node, "reg", &port_id);
1301 	if (ret)
1302 		return ret;
1303 
1304 	if (port_id == 0)
1305 		return PRUETH_MAC0;
1306 	else if (port_id == 1)
1307 		return PRUETH_MAC1;
1308 	else
1309 		return PRUETH_MAC_INVALID;
1310 }
1311 
1312 static int prueth_netdev_init(struct prueth *prueth,
1313 			      struct device_node *eth_node)
1314 {
1315 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1316 	struct prueth_emac *emac;
1317 	struct net_device *ndev;
1318 	enum prueth_port port;
1319 	enum prueth_mac mac;
1320 
1321 	port = prueth_node_port(eth_node);
1322 	if (port == PRUETH_PORT_INVALID)
1323 		return -EINVAL;
1324 
1325 	mac = prueth_node_mac(eth_node);
1326 	if (mac == PRUETH_MAC_INVALID)
1327 		return -EINVAL;
1328 
1329 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1330 	if (!ndev)
1331 		return -ENOMEM;
1332 
1333 	emac = netdev_priv(ndev);
1334 	emac->prueth = prueth;
1335 	emac->ndev = ndev;
1336 	emac->port_id = port;
1337 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1338 	if (!emac->cmd_wq) {
1339 		ret = -ENOMEM;
1340 		goto free_ndev;
1341 	}
1342 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1343 
1344 	INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
1345 
1346 	ret = pruss_request_mem_region(prueth->pruss,
1347 				       port == PRUETH_PORT_MII0 ?
1348 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1349 				       &emac->dram);
1350 	if (ret) {
1351 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1352 		ret = -ENOMEM;
1353 		goto free_wq;
1354 	}
1355 
1356 	emac->tx_ch_num = 1;
1357 
1358 	SET_NETDEV_DEV(ndev, prueth->dev);
1359 	spin_lock_init(&emac->lock);
1360 	mutex_init(&emac->cmd_lock);
1361 
1362 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1363 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1364 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1365 		ret = -ENODEV;
1366 		goto free;
1367 	} else if (of_phy_is_fixed_link(eth_node)) {
1368 		ret = of_phy_register_fixed_link(eth_node);
1369 		if (ret) {
1370 			ret = dev_err_probe(prueth->dev, ret,
1371 					    "failed to register fixed-link phy\n");
1372 			goto free;
1373 		}
1374 
1375 		emac->phy_node = eth_node;
1376 	}
1377 
1378 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1379 	if (ret) {
1380 		dev_err(prueth->dev, "could not get phy-mode property\n");
1381 		goto free;
1382 	}
1383 
1384 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1385 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1386 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1387 		ret = -EINVAL;
1388 		goto free;
1389 	}
1390 
1391 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1392 	 * and it is not possible to disable TX Internal delay. The below
1393 	 * switch case block describes how we handle different phy modes
1394 	 * based on hardware restriction.
1395 	 */
1396 	switch (emac->phy_if) {
1397 	case PHY_INTERFACE_MODE_RGMII_ID:
1398 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1399 		break;
1400 	case PHY_INTERFACE_MODE_RGMII_TXID:
1401 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1402 		break;
1403 	case PHY_INTERFACE_MODE_RGMII:
1404 	case PHY_INTERFACE_MODE_RGMII_RXID:
1405 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1406 		ret = -EINVAL;
1407 		goto free;
1408 	default:
1409 		break;
1410 	}
1411 
1412 	/* get mac address from DT and set private and netdev addr */
1413 	ret = of_get_ethdev_address(eth_node, ndev);
1414 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1415 		eth_hw_addr_random(ndev);
1416 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1417 			 port, ndev->dev_addr);
1418 	}
1419 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1420 
1421 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1422 	ndev->max_mtu = PRUETH_MAX_MTU;
1423 	ndev->netdev_ops = &emac_netdev_ops;
1424 	ndev->hw_features = NETIF_F_SG;
1425 	ndev->features = ndev->hw_features;
1426 
1427 	netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
1428 	prueth->emac[mac] = emac;
1429 
1430 	return 0;
1431 
1432 free:
1433 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1434 free_wq:
1435 	destroy_workqueue(emac->cmd_wq);
1436 free_ndev:
1437 	emac->ndev = NULL;
1438 	prueth->emac[mac] = NULL;
1439 	free_netdev(ndev);
1440 
1441 	return ret;
1442 }
1443 
1444 static void prueth_netdev_exit(struct prueth *prueth,
1445 			       struct device_node *eth_node)
1446 {
1447 	struct prueth_emac *emac;
1448 	enum prueth_mac mac;
1449 
1450 	mac = prueth_node_mac(eth_node);
1451 	if (mac == PRUETH_MAC_INVALID)
1452 		return;
1453 
1454 	emac = prueth->emac[mac];
1455 	if (!emac)
1456 		return;
1457 
1458 	if (of_phy_is_fixed_link(emac->phy_node))
1459 		of_phy_deregister_fixed_link(emac->phy_node);
1460 
1461 	netif_napi_del(&emac->napi_rx);
1462 
1463 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1464 	destroy_workqueue(emac->cmd_wq);
1465 	free_netdev(emac->ndev);
1466 	prueth->emac[mac] = NULL;
1467 }
1468 
1469 static int prueth_get_cores(struct prueth *prueth, int slice)
1470 {
1471 	struct device *dev = prueth->dev;
1472 	enum pruss_pru_id pruss_id;
1473 	struct device_node *np;
1474 	int idx = -1, ret;
1475 
1476 	np = dev->of_node;
1477 
1478 	switch (slice) {
1479 	case ICSS_SLICE0:
1480 		idx = 0;
1481 		break;
1482 	case ICSS_SLICE1:
1483 		idx = 3;
1484 		break;
1485 	default:
1486 		return -EINVAL;
1487 	}
1488 
1489 	prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
1490 	if (IS_ERR(prueth->pru[slice])) {
1491 		ret = PTR_ERR(prueth->pru[slice]);
1492 		prueth->pru[slice] = NULL;
1493 		return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
1494 	}
1495 	prueth->pru_id[slice] = pruss_id;
1496 
1497 	idx++;
1498 	prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
1499 	if (IS_ERR(prueth->rtu[slice])) {
1500 		ret = PTR_ERR(prueth->rtu[slice]);
1501 		prueth->rtu[slice] = NULL;
1502 		return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
1503 	}
1504 
1505 	idx++;
1506 	prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
1507 	if (IS_ERR(prueth->txpru[slice])) {
1508 		ret = PTR_ERR(prueth->txpru[slice]);
1509 		prueth->txpru[slice] = NULL;
1510 		return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 static void prueth_put_cores(struct prueth *prueth, int slice)
1517 {
1518 	if (prueth->txpru[slice])
1519 		pru_rproc_put(prueth->txpru[slice]);
1520 
1521 	if (prueth->rtu[slice])
1522 		pru_rproc_put(prueth->rtu[slice]);
1523 
1524 	if (prueth->pru[slice])
1525 		pru_rproc_put(prueth->pru[slice]);
1526 }
1527 
1528 static const struct of_device_id prueth_dt_match[];
1529 
1530 static int prueth_probe(struct platform_device *pdev)
1531 {
1532 	struct device_node *eth_node, *eth_ports_node;
1533 	struct device_node  *eth0_node = NULL;
1534 	struct device_node  *eth1_node = NULL;
1535 	struct genpool_data_align gp_data = {
1536 		.align = SZ_64K,
1537 	};
1538 	const struct of_device_id *match;
1539 	struct device *dev = &pdev->dev;
1540 	struct device_node *np;
1541 	struct prueth *prueth;
1542 	struct pruss *pruss;
1543 	u32 msmc_ram_size;
1544 	int i, ret;
1545 
1546 	np = dev->of_node;
1547 
1548 	match = of_match_device(prueth_dt_match, dev);
1549 	if (!match)
1550 		return -ENODEV;
1551 
1552 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1553 	if (!prueth)
1554 		return -ENOMEM;
1555 
1556 	dev_set_drvdata(dev, prueth);
1557 	prueth->pdev = pdev;
1558 	prueth->pdata = *(const struct prueth_pdata *)match->data;
1559 
1560 	prueth->dev = dev;
1561 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1562 	if (!eth_ports_node)
1563 		return -ENOENT;
1564 
1565 	for_each_child_of_node(eth_ports_node, eth_node) {
1566 		u32 reg;
1567 
1568 		if (strcmp(eth_node->name, "port"))
1569 			continue;
1570 		ret = of_property_read_u32(eth_node, "reg", &reg);
1571 		if (ret < 0) {
1572 			dev_err(dev, "%pOF error reading port_id %d\n",
1573 				eth_node, ret);
1574 		}
1575 
1576 		of_node_get(eth_node);
1577 
1578 		if (reg == 0) {
1579 			eth0_node = eth_node;
1580 			if (!of_device_is_available(eth0_node)) {
1581 				of_node_put(eth0_node);
1582 				eth0_node = NULL;
1583 			}
1584 		} else if (reg == 1) {
1585 			eth1_node = eth_node;
1586 			if (!of_device_is_available(eth1_node)) {
1587 				of_node_put(eth1_node);
1588 				eth1_node = NULL;
1589 			}
1590 		} else {
1591 			dev_err(dev, "port reg should be 0 or 1\n");
1592 		}
1593 	}
1594 
1595 	of_node_put(eth_ports_node);
1596 
1597 	/* At least one node must be present and available else we fail */
1598 	if (!eth0_node && !eth1_node) {
1599 		dev_err(dev, "neither port0 nor port1 node available\n");
1600 		return -ENODEV;
1601 	}
1602 
1603 	if (eth0_node == eth1_node) {
1604 		dev_err(dev, "port0 and port1 can't have same reg\n");
1605 		of_node_put(eth0_node);
1606 		return -ENODEV;
1607 	}
1608 
1609 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1610 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1611 
1612 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1613 	if (IS_ERR(prueth->miig_rt)) {
1614 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1615 		return -ENODEV;
1616 	}
1617 
1618 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1619 	if (IS_ERR(prueth->mii_rt)) {
1620 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1621 		return -ENODEV;
1622 	}
1623 
1624 	if (eth0_node) {
1625 		ret = prueth_get_cores(prueth, ICSS_SLICE0);
1626 		if (ret)
1627 			goto put_cores;
1628 	}
1629 
1630 	if (eth1_node) {
1631 		ret = prueth_get_cores(prueth, ICSS_SLICE1);
1632 		if (ret)
1633 			goto put_cores;
1634 	}
1635 
1636 	pruss = pruss_get(eth0_node ?
1637 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1638 	if (IS_ERR(pruss)) {
1639 		ret = PTR_ERR(pruss);
1640 		dev_err(dev, "unable to get pruss handle\n");
1641 		goto put_cores;
1642 	}
1643 
1644 	prueth->pruss = pruss;
1645 
1646 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1647 				       &prueth->shram);
1648 	if (ret) {
1649 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1650 		pruss_put(prueth->pruss);
1651 	}
1652 
1653 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1654 	if (!prueth->sram_pool) {
1655 		dev_err(dev, "unable to get SRAM pool\n");
1656 		ret = -ENODEV;
1657 
1658 		goto put_mem;
1659 	}
1660 
1661 	msmc_ram_size = MSMC_RAM_SIZE;
1662 
1663 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1664 	prueth->msmcram.va =
1665 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1666 						    msmc_ram_size,
1667 						    gen_pool_first_fit_align,
1668 						    &gp_data);
1669 
1670 	if (!prueth->msmcram.va) {
1671 		ret = -ENOMEM;
1672 		dev_err(dev, "unable to allocate MSMC resource\n");
1673 		goto put_mem;
1674 	}
1675 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1676 						   (unsigned long)prueth->msmcram.va);
1677 	prueth->msmcram.size = msmc_ram_size;
1678 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1679 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1680 		prueth->msmcram.va, prueth->msmcram.size);
1681 
1682 	/* setup netdev interfaces */
1683 	if (eth0_node) {
1684 		ret = prueth_netdev_init(prueth, eth0_node);
1685 		if (ret) {
1686 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1687 				      eth0_node->name);
1688 			goto netdev_exit;
1689 		}
1690 	}
1691 
1692 	if (eth1_node) {
1693 		ret = prueth_netdev_init(prueth, eth1_node);
1694 		if (ret) {
1695 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1696 				      eth1_node->name);
1697 			goto netdev_exit;
1698 		}
1699 	}
1700 
1701 	/* register the network devices */
1702 	if (eth0_node) {
1703 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1704 		if (ret) {
1705 			dev_err(dev, "can't register netdev for port MII0");
1706 			goto netdev_exit;
1707 		}
1708 
1709 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1710 
1711 		emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1712 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1713 	}
1714 
1715 	if (eth1_node) {
1716 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1717 		if (ret) {
1718 			dev_err(dev, "can't register netdev for port MII1");
1719 			goto netdev_unregister;
1720 		}
1721 
1722 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1723 		emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1724 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1725 	}
1726 
1727 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1728 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1729 
1730 	if (eth1_node)
1731 		of_node_put(eth1_node);
1732 	if (eth0_node)
1733 		of_node_put(eth0_node);
1734 	return 0;
1735 
1736 netdev_unregister:
1737 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1738 		if (!prueth->registered_netdevs[i])
1739 			continue;
1740 		if (prueth->emac[i]->ndev->phydev) {
1741 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1742 			prueth->emac[i]->ndev->phydev = NULL;
1743 		}
1744 		unregister_netdev(prueth->registered_netdevs[i]);
1745 	}
1746 
1747 netdev_exit:
1748 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1749 		eth_node = prueth->eth_node[i];
1750 		if (!eth_node)
1751 			continue;
1752 
1753 		prueth_netdev_exit(prueth, eth_node);
1754 	}
1755 
1756 	gen_pool_free(prueth->sram_pool,
1757 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1758 
1759 put_mem:
1760 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1761 	pruss_put(prueth->pruss);
1762 
1763 put_cores:
1764 	if (eth1_node) {
1765 		prueth_put_cores(prueth, ICSS_SLICE1);
1766 		of_node_put(eth1_node);
1767 	}
1768 
1769 	if (eth0_node) {
1770 		prueth_put_cores(prueth, ICSS_SLICE0);
1771 		of_node_put(eth0_node);
1772 	}
1773 
1774 	return ret;
1775 }
1776 
1777 static void prueth_remove(struct platform_device *pdev)
1778 {
1779 	struct prueth *prueth = platform_get_drvdata(pdev);
1780 	struct device_node *eth_node;
1781 	int i;
1782 
1783 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1784 		if (!prueth->registered_netdevs[i])
1785 			continue;
1786 		phy_stop(prueth->emac[i]->ndev->phydev);
1787 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1788 		prueth->emac[i]->ndev->phydev = NULL;
1789 		unregister_netdev(prueth->registered_netdevs[i]);
1790 	}
1791 
1792 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1793 		eth_node = prueth->eth_node[i];
1794 		if (!eth_node)
1795 			continue;
1796 
1797 		prueth_netdev_exit(prueth, eth_node);
1798 	}
1799 
1800 	gen_pool_free(prueth->sram_pool,
1801 		      (unsigned long)prueth->msmcram.va,
1802 		      MSMC_RAM_SIZE);
1803 
1804 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1805 
1806 	pruss_put(prueth->pruss);
1807 
1808 	if (prueth->eth_node[PRUETH_MAC1])
1809 		prueth_put_cores(prueth, ICSS_SLICE1);
1810 
1811 	if (prueth->eth_node[PRUETH_MAC0])
1812 		prueth_put_cores(prueth, ICSS_SLICE0);
1813 }
1814 
1815 static const struct prueth_pdata am654_icssg_pdata = {
1816 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1817 	.quirk_10m_link_issue = 1,
1818 };
1819 
1820 static const struct of_device_id prueth_dt_match[] = {
1821 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
1822 	{ /* sentinel */ }
1823 };
1824 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1825 
1826 static struct platform_driver prueth_driver = {
1827 	.probe = prueth_probe,
1828 	.remove_new = prueth_remove,
1829 	.driver = {
1830 		.name = "icssg-prueth",
1831 		.of_match_table = prueth_dt_match,
1832 	},
1833 };
1834 module_platform_driver(prueth_driver);
1835 
1836 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1837 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1838 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
1839 MODULE_LICENSE("GPL");
1840