1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Ethernet Driver
4  *
5  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dma/ti-cppi5.h>
13 #include <linux/etherdevice.h>
14 #include <linux/genalloc.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_mdio.h>
23 #include <linux/of_net.h>
24 #include <linux/of_platform.h>
25 #include <linux/phy.h>
26 #include <linux/remoteproc/pruss.h>
27 #include <linux/regmap.h>
28 #include <linux/remoteproc.h>
29 
30 #include "icssg_prueth.h"
31 #include "icssg_mii_rt.h"
32 #include "../k3-cppi-desc-pool.h"
33 
34 #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
35 
36 /* Netif debug messages possible */
37 #define PRUETH_EMAC_DEBUG       (NETIF_MSG_DRV | \
38 				 NETIF_MSG_PROBE | \
39 				 NETIF_MSG_LINK | \
40 				 NETIF_MSG_TIMER | \
41 				 NETIF_MSG_IFDOWN | \
42 				 NETIF_MSG_IFUP | \
43 				 NETIF_MSG_RX_ERR | \
44 				 NETIF_MSG_TX_ERR | \
45 				 NETIF_MSG_TX_QUEUED | \
46 				 NETIF_MSG_INTR | \
47 				 NETIF_MSG_TX_DONE | \
48 				 NETIF_MSG_RX_STATUS | \
49 				 NETIF_MSG_PKTDATA | \
50 				 NETIF_MSG_HW | \
51 				 NETIF_MSG_WOL)
52 
53 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
54 
55 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */
56 #define ICSSG_CTRL_RGMII_ID_MODE                BIT(24)
57 
58 static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
59 				   struct prueth_rx_chn *rx_chn,
60 				   int max_rflows)
61 {
62 	if (rx_chn->desc_pool)
63 		k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
64 
65 	if (rx_chn->rx_chn)
66 		k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
67 }
68 
69 static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
70 {
71 	int i;
72 
73 	for (i = 0; i < emac->tx_ch_num; i++) {
74 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
75 
76 		if (tx_chn->desc_pool)
77 			k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
78 
79 		if (tx_chn->tx_chn)
80 			k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
81 
82 		/* Assume prueth_cleanup_tx_chns() is called at the
83 		 * end after all channel resources are freed
84 		 */
85 		memset(tx_chn, 0, sizeof(*tx_chn));
86 	}
87 }
88 
89 static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
90 {
91 	int i;
92 
93 	for (i = 0; i < num; i++) {
94 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
95 
96 		if (tx_chn->irq)
97 			free_irq(tx_chn->irq, tx_chn);
98 		netif_napi_del(&tx_chn->napi_tx);
99 	}
100 }
101 
102 static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
103 			     struct cppi5_host_desc_t *desc)
104 {
105 	struct cppi5_host_desc_t *first_desc, *next_desc;
106 	dma_addr_t buf_dma, next_desc_dma;
107 	u32 buf_dma_len;
108 
109 	first_desc = desc;
110 	next_desc = first_desc;
111 
112 	cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
113 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
114 
115 	dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
116 			 DMA_TO_DEVICE);
117 
118 	next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
119 	k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
120 	while (next_desc_dma) {
121 		next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
122 						       next_desc_dma);
123 		cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
124 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
125 
126 		dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
127 			       DMA_TO_DEVICE);
128 
129 		next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
130 		k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
131 
132 		k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
133 	}
134 
135 	k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
136 }
137 
138 static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
139 				    int budget)
140 {
141 	struct net_device *ndev = emac->ndev;
142 	struct cppi5_host_desc_t *desc_tx;
143 	struct netdev_queue *netif_txq;
144 	struct prueth_tx_chn *tx_chn;
145 	unsigned int total_bytes = 0;
146 	struct sk_buff *skb;
147 	dma_addr_t desc_dma;
148 	int res, num_tx = 0;
149 	void **swdata;
150 
151 	tx_chn = &emac->tx_chns[chn];
152 
153 	while (true) {
154 		res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
155 		if (res == -ENODATA)
156 			break;
157 
158 		/* teardown completion */
159 		if (cppi5_desc_is_tdcm(desc_dma)) {
160 			if (atomic_dec_and_test(&emac->tdown_cnt))
161 				complete(&emac->tdown_complete);
162 			break;
163 		}
164 
165 		desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
166 						     desc_dma);
167 		swdata = cppi5_hdesc_get_swdata(desc_tx);
168 
169 		skb = *(swdata);
170 		prueth_xmit_free(tx_chn, desc_tx);
171 
172 		ndev = skb->dev;
173 		ndev->stats.tx_packets++;
174 		ndev->stats.tx_bytes += skb->len;
175 		total_bytes += skb->len;
176 		napi_consume_skb(skb, budget);
177 		num_tx++;
178 	}
179 
180 	if (!num_tx)
181 		return 0;
182 
183 	netif_txq = netdev_get_tx_queue(ndev, chn);
184 	netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
185 
186 	if (netif_tx_queue_stopped(netif_txq)) {
187 		/* If the TX queue was stopped, wake it now
188 		 * if we have enough room.
189 		 */
190 		__netif_tx_lock(netif_txq, smp_processor_id());
191 		if (netif_running(ndev) &&
192 		    (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
193 		     MAX_SKB_FRAGS))
194 			netif_tx_wake_queue(netif_txq);
195 		__netif_tx_unlock(netif_txq);
196 	}
197 
198 	return num_tx;
199 }
200 
201 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
202 {
203 	struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
204 	struct prueth_emac *emac = tx_chn->emac;
205 	int num_tx_packets;
206 
207 	num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
208 
209 	if (num_tx_packets >= budget)
210 		return budget;
211 
212 	if (napi_complete_done(napi_tx, num_tx_packets))
213 		enable_irq(tx_chn->irq);
214 
215 	return num_tx_packets;
216 }
217 
218 static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
219 {
220 	struct prueth_tx_chn *tx_chn = dev_id;
221 
222 	disable_irq_nosync(irq);
223 	napi_schedule(&tx_chn->napi_tx);
224 
225 	return IRQ_HANDLED;
226 }
227 
228 static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
229 {
230 	struct prueth *prueth = emac->prueth;
231 	int i, ret;
232 
233 	for (i = 0; i < emac->tx_ch_num; i++) {
234 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
235 
236 		netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
237 		ret = request_irq(tx_chn->irq, prueth_tx_irq,
238 				  IRQF_TRIGGER_HIGH, tx_chn->name,
239 				  tx_chn);
240 		if (ret) {
241 			netif_napi_del(&tx_chn->napi_tx);
242 			dev_err(prueth->dev, "unable to request TX IRQ %d\n",
243 				tx_chn->irq);
244 			goto fail;
245 		}
246 	}
247 
248 	return 0;
249 fail:
250 	prueth_ndev_del_tx_napi(emac, i);
251 	return ret;
252 }
253 
254 static int prueth_init_tx_chns(struct prueth_emac *emac)
255 {
256 	static const struct k3_ring_cfg ring_cfg = {
257 		.elm_size = K3_RINGACC_RING_ELSIZE_8,
258 		.mode = K3_RINGACC_RING_MODE_RING,
259 		.flags = 0,
260 		.size = PRUETH_MAX_TX_DESC,
261 	};
262 	struct k3_udma_glue_tx_channel_cfg tx_cfg;
263 	struct device *dev = emac->prueth->dev;
264 	struct net_device *ndev = emac->ndev;
265 	int ret, slice, i;
266 	u32 hdesc_size;
267 
268 	slice = prueth_emac_slice(emac);
269 	if (slice < 0)
270 		return slice;
271 
272 	init_completion(&emac->tdown_complete);
273 
274 	hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
275 					   PRUETH_NAV_SW_DATA_SIZE);
276 	memset(&tx_cfg, 0, sizeof(tx_cfg));
277 	tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
278 	tx_cfg.tx_cfg = ring_cfg;
279 	tx_cfg.txcq_cfg = ring_cfg;
280 
281 	for (i = 0; i < emac->tx_ch_num; i++) {
282 		struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
283 
284 		/* To differentiate channels for SLICE0 vs SLICE1 */
285 		snprintf(tx_chn->name, sizeof(tx_chn->name),
286 			 "tx%d-%d", slice, i);
287 
288 		tx_chn->emac = emac;
289 		tx_chn->id = i;
290 		tx_chn->descs_num = PRUETH_MAX_TX_DESC;
291 
292 		tx_chn->tx_chn =
293 			k3_udma_glue_request_tx_chn(dev, tx_chn->name,
294 						    &tx_cfg);
295 		if (IS_ERR(tx_chn->tx_chn)) {
296 			ret = PTR_ERR(tx_chn->tx_chn);
297 			tx_chn->tx_chn = NULL;
298 			netdev_err(ndev,
299 				   "Failed to request tx dma ch: %d\n", ret);
300 			goto fail;
301 		}
302 
303 		tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
304 		tx_chn->desc_pool =
305 			k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
306 						      tx_chn->descs_num,
307 						      hdesc_size,
308 						      tx_chn->name);
309 		if (IS_ERR(tx_chn->desc_pool)) {
310 			ret = PTR_ERR(tx_chn->desc_pool);
311 			tx_chn->desc_pool = NULL;
312 			netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
313 			goto fail;
314 		}
315 
316 		tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
317 		if (tx_chn->irq <= 0) {
318 			ret = -EINVAL;
319 			netdev_err(ndev, "failed to get tx irq\n");
320 			goto fail;
321 		}
322 
323 		snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
324 			 dev_name(dev), tx_chn->id);
325 	}
326 
327 	return 0;
328 
329 fail:
330 	prueth_cleanup_tx_chns(emac);
331 	return ret;
332 }
333 
334 static int prueth_init_rx_chns(struct prueth_emac *emac,
335 			       struct prueth_rx_chn *rx_chn,
336 			       char *name, u32 max_rflows,
337 			       u32 max_desc_num)
338 {
339 	struct k3_udma_glue_rx_channel_cfg rx_cfg;
340 	struct device *dev = emac->prueth->dev;
341 	struct net_device *ndev = emac->ndev;
342 	u32 fdqring_id, hdesc_size;
343 	int i, ret = 0, slice;
344 
345 	slice = prueth_emac_slice(emac);
346 	if (slice < 0)
347 		return slice;
348 
349 	/* To differentiate channels for SLICE0 vs SLICE1 */
350 	snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
351 
352 	hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
353 					   PRUETH_NAV_SW_DATA_SIZE);
354 	memset(&rx_cfg, 0, sizeof(rx_cfg));
355 	rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
356 	rx_cfg.flow_id_num = max_rflows;
357 	rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
358 
359 	/* init all flows */
360 	rx_chn->dev = dev;
361 	rx_chn->descs_num = max_desc_num;
362 
363 	rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
364 						     &rx_cfg);
365 	if (IS_ERR(rx_chn->rx_chn)) {
366 		ret = PTR_ERR(rx_chn->rx_chn);
367 		rx_chn->rx_chn = NULL;
368 		netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
369 		goto fail;
370 	}
371 
372 	rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
373 	rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
374 							  rx_chn->descs_num,
375 							  hdesc_size,
376 							  rx_chn->name);
377 	if (IS_ERR(rx_chn->desc_pool)) {
378 		ret = PTR_ERR(rx_chn->desc_pool);
379 		rx_chn->desc_pool = NULL;
380 		netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
381 		goto fail;
382 	}
383 
384 	emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
385 	netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
386 
387 	fdqring_id = K3_RINGACC_RING_ID_ANY;
388 	for (i = 0; i < rx_cfg.flow_id_num; i++) {
389 		struct k3_ring_cfg rxring_cfg = {
390 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
391 			.mode = K3_RINGACC_RING_MODE_RING,
392 			.flags = 0,
393 		};
394 		struct k3_ring_cfg fdqring_cfg = {
395 			.elm_size = K3_RINGACC_RING_ELSIZE_8,
396 			.flags = K3_RINGACC_RING_SHARED,
397 		};
398 		struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
399 			.rx_cfg = rxring_cfg,
400 			.rxfdq_cfg = fdqring_cfg,
401 			.ring_rxq_id = K3_RINGACC_RING_ID_ANY,
402 			.src_tag_lo_sel =
403 				K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
404 		};
405 
406 		rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
407 		rx_flow_cfg.rx_cfg.size = max_desc_num;
408 		rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
409 		rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
410 
411 		ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
412 						i, &rx_flow_cfg);
413 		if (ret) {
414 			netdev_err(ndev, "Failed to init rx flow%d %d\n",
415 				   i, ret);
416 			goto fail;
417 		}
418 		if (!i)
419 			fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
420 								     i);
421 		rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
422 		if (rx_chn->irq[i] <= 0) {
423 			ret = rx_chn->irq[i];
424 			netdev_err(ndev, "Failed to get rx dma irq");
425 			goto fail;
426 		}
427 	}
428 
429 	return 0;
430 
431 fail:
432 	prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
433 	return ret;
434 }
435 
436 static int prueth_dma_rx_push(struct prueth_emac *emac,
437 			      struct sk_buff *skb,
438 			      struct prueth_rx_chn *rx_chn)
439 {
440 	struct net_device *ndev = emac->ndev;
441 	struct cppi5_host_desc_t *desc_rx;
442 	u32 pkt_len = skb_tailroom(skb);
443 	dma_addr_t desc_dma;
444 	dma_addr_t buf_dma;
445 	void **swdata;
446 
447 	desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
448 	if (!desc_rx) {
449 		netdev_err(ndev, "rx push: failed to allocate descriptor\n");
450 		return -ENOMEM;
451 	}
452 	desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
453 
454 	buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
455 	if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
456 		k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
457 		netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
458 		return -EINVAL;
459 	}
460 
461 	cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
462 			 PRUETH_NAV_PS_DATA_SIZE);
463 	k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
464 	cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
465 
466 	swdata = cppi5_hdesc_get_swdata(desc_rx);
467 	*swdata = skb;
468 
469 	return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
470 					desc_rx, desc_dma);
471 }
472 
473 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
474 {
475 	struct prueth_rx_chn *rx_chn = &emac->rx_chns;
476 	u32 buf_dma_len, pkt_len, port_id = 0;
477 	struct net_device *ndev = emac->ndev;
478 	struct cppi5_host_desc_t *desc_rx;
479 	struct sk_buff *skb, *new_skb;
480 	dma_addr_t desc_dma, buf_dma;
481 	void **swdata;
482 	int ret;
483 
484 	ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
485 	if (ret) {
486 		if (ret != -ENODATA)
487 			netdev_err(ndev, "rx pop: failed: %d\n", ret);
488 		return ret;
489 	}
490 
491 	if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
492 		return 0;
493 
494 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
495 
496 	swdata = cppi5_hdesc_get_swdata(desc_rx);
497 	skb = *swdata;
498 
499 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
500 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
501 	pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
502 	/* firmware adds 4 CRC bytes, strip them */
503 	pkt_len -= 4;
504 	cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
505 
506 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
507 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
508 
509 	skb->dev = ndev;
510 	new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
511 	/* if allocation fails we drop the packet but push the
512 	 * descriptor back to the ring with old skb to prevent a stall
513 	 */
514 	if (!new_skb) {
515 		ndev->stats.rx_dropped++;
516 		new_skb = skb;
517 	} else {
518 		/* send the filled skb up the n/w stack */
519 		skb_put(skb, pkt_len);
520 		skb->protocol = eth_type_trans(skb, ndev);
521 		napi_gro_receive(&emac->napi_rx, skb);
522 		ndev->stats.rx_bytes += pkt_len;
523 		ndev->stats.rx_packets++;
524 	}
525 
526 	/* queue another RX DMA */
527 	ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
528 	if (WARN_ON(ret < 0)) {
529 		dev_kfree_skb_any(new_skb);
530 		ndev->stats.rx_errors++;
531 		ndev->stats.rx_dropped++;
532 	}
533 
534 	return ret;
535 }
536 
537 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
538 {
539 	struct prueth_rx_chn *rx_chn = data;
540 	struct cppi5_host_desc_t *desc_rx;
541 	struct sk_buff *skb;
542 	dma_addr_t buf_dma;
543 	u32 buf_dma_len;
544 	void **swdata;
545 
546 	desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
547 	swdata = cppi5_hdesc_get_swdata(desc_rx);
548 	skb = *swdata;
549 	cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
550 	k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
551 
552 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
553 			 DMA_FROM_DEVICE);
554 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
555 
556 	dev_kfree_skb_any(skb);
557 }
558 
559 /**
560  * emac_ndo_start_xmit - EMAC Transmit function
561  * @skb: SKB pointer
562  * @ndev: EMAC network adapter
563  *
564  * Called by the system to transmit a packet  - we queue the packet in
565  * EMAC hardware transmit queue
566  * Doesn't wait for completion we'll check for TX completion in
567  * emac_tx_complete_packets().
568  *
569  * Return: enum netdev_tx
570  */
571 static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
572 {
573 	struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
574 	struct prueth_emac *emac = netdev_priv(ndev);
575 	struct netdev_queue *netif_txq;
576 	struct prueth_tx_chn *tx_chn;
577 	dma_addr_t desc_dma, buf_dma;
578 	int i, ret = 0, q_idx;
579 	void **swdata;
580 	u32 pkt_len;
581 	u32 *epib;
582 
583 	pkt_len = skb_headlen(skb);
584 	q_idx = skb_get_queue_mapping(skb);
585 
586 	tx_chn = &emac->tx_chns[q_idx];
587 	netif_txq = netdev_get_tx_queue(ndev, q_idx);
588 
589 	/* Map the linear buffer */
590 	buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
591 	if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
592 		netdev_err(ndev, "tx: failed to map skb buffer\n");
593 		ret = NETDEV_TX_OK;
594 		goto drop_free_skb;
595 	}
596 
597 	first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
598 	if (!first_desc) {
599 		netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
600 		dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
601 		goto drop_stop_q_busy;
602 	}
603 
604 	cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
605 			 PRUETH_NAV_PS_DATA_SIZE);
606 	cppi5_hdesc_set_pkttype(first_desc, 0);
607 	epib = first_desc->epib;
608 	epib[0] = 0;
609 	epib[1] = 0;
610 
611 	/* set dst tag to indicate internal qid at the firmware which is at
612 	 * bit8..bit15. bit0..bit7 indicates port num for directed
613 	 * packets in case of switch mode operation
614 	 */
615 	cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
616 	k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
617 	cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
618 	swdata = cppi5_hdesc_get_swdata(first_desc);
619 	*swdata = skb;
620 
621 	/* Handle the case where skb is fragmented in pages */
622 	cur_desc = first_desc;
623 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
624 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
625 		u32 frag_size = skb_frag_size(frag);
626 
627 		next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
628 		if (!next_desc) {
629 			netdev_err(ndev,
630 				   "tx: failed to allocate frag. descriptor\n");
631 			goto free_desc_stop_q_busy;
632 		}
633 
634 		buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
635 					   DMA_TO_DEVICE);
636 		if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
637 			netdev_err(ndev, "tx: Failed to map skb page\n");
638 			k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
639 			ret = NETDEV_TX_OK;
640 			goto drop_free_descs;
641 		}
642 
643 		cppi5_hdesc_reset_hbdesc(next_desc);
644 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
645 		cppi5_hdesc_attach_buf(next_desc,
646 				       buf_dma, frag_size, buf_dma, frag_size);
647 
648 		desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
649 						      next_desc);
650 		k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
651 		cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
652 
653 		pkt_len += frag_size;
654 		cur_desc = next_desc;
655 	}
656 	WARN_ON_ONCE(pkt_len != skb->len);
657 
658 	/* report bql before sending packet */
659 	netdev_tx_sent_queue(netif_txq, pkt_len);
660 
661 	cppi5_hdesc_set_pktlen(first_desc, pkt_len);
662 	desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
663 	/* cppi5_desc_dump(first_desc, 64); */
664 
665 	skb_tx_timestamp(skb);  /* SW timestamp if SKBTX_IN_PROGRESS not set */
666 	ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
667 	if (ret) {
668 		netdev_err(ndev, "tx: push failed: %d\n", ret);
669 		goto drop_free_descs;
670 	}
671 
672 	if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
673 		netif_tx_stop_queue(netif_txq);
674 		/* Barrier, so that stop_queue visible to other cpus */
675 		smp_mb__after_atomic();
676 
677 		if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
678 		    MAX_SKB_FRAGS)
679 			netif_tx_wake_queue(netif_txq);
680 	}
681 
682 	return NETDEV_TX_OK;
683 
684 drop_free_descs:
685 	prueth_xmit_free(tx_chn, first_desc);
686 
687 drop_free_skb:
688 	dev_kfree_skb_any(skb);
689 
690 	/* error */
691 	ndev->stats.tx_dropped++;
692 	netdev_err(ndev, "tx: error: %d\n", ret);
693 
694 	return ret;
695 
696 free_desc_stop_q_busy:
697 	prueth_xmit_free(tx_chn, first_desc);
698 
699 drop_stop_q_busy:
700 	netif_tx_stop_queue(netif_txq);
701 	return NETDEV_TX_BUSY;
702 }
703 
704 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
705 {
706 	struct prueth_tx_chn *tx_chn = data;
707 	struct cppi5_host_desc_t *desc_tx;
708 	struct sk_buff *skb;
709 	void **swdata;
710 
711 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
712 	swdata = cppi5_hdesc_get_swdata(desc_tx);
713 	skb = *(swdata);
714 	prueth_xmit_free(tx_chn, desc_tx);
715 
716 	dev_kfree_skb_any(skb);
717 }
718 
719 static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
720 {
721 	struct prueth_emac *emac = dev_id;
722 
723 	disable_irq_nosync(irq);
724 	napi_schedule(&emac->napi_rx);
725 
726 	return IRQ_HANDLED;
727 }
728 
729 struct icssg_firmwares {
730 	char *pru;
731 	char *rtu;
732 	char *txpru;
733 };
734 
735 static struct icssg_firmwares icssg_emac_firmwares[] = {
736 	{
737 		.pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
738 		.rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
739 		.txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
740 	},
741 	{
742 		.pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
743 		.rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
744 		.txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
745 	}
746 };
747 
748 static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
749 {
750 	struct icssg_firmwares *firmwares;
751 	struct device *dev = prueth->dev;
752 	int slice, ret;
753 
754 	firmwares = icssg_emac_firmwares;
755 
756 	slice = prueth_emac_slice(emac);
757 	if (slice < 0) {
758 		netdev_err(emac->ndev, "invalid port\n");
759 		return -EINVAL;
760 	}
761 
762 	ret = icssg_config(prueth, emac, slice);
763 	if (ret)
764 		return ret;
765 
766 	ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
767 	ret = rproc_boot(prueth->pru[slice]);
768 	if (ret) {
769 		dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
770 		return -EINVAL;
771 	}
772 
773 	ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
774 	ret = rproc_boot(prueth->rtu[slice]);
775 	if (ret) {
776 		dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
777 		goto halt_pru;
778 	}
779 
780 	ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
781 	ret = rproc_boot(prueth->txpru[slice]);
782 	if (ret) {
783 		dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
784 		goto halt_rtu;
785 	}
786 
787 	emac->fw_running = 1;
788 	return 0;
789 
790 halt_rtu:
791 	rproc_shutdown(prueth->rtu[slice]);
792 
793 halt_pru:
794 	rproc_shutdown(prueth->pru[slice]);
795 
796 	return ret;
797 }
798 
799 static void prueth_emac_stop(struct prueth_emac *emac)
800 {
801 	struct prueth *prueth = emac->prueth;
802 	int slice;
803 
804 	switch (emac->port_id) {
805 	case PRUETH_PORT_MII0:
806 		slice = ICSS_SLICE0;
807 		break;
808 	case PRUETH_PORT_MII1:
809 		slice = ICSS_SLICE1;
810 		break;
811 	default:
812 		netdev_err(emac->ndev, "invalid port\n");
813 		return;
814 	}
815 
816 	emac->fw_running = 0;
817 	rproc_shutdown(prueth->txpru[slice]);
818 	rproc_shutdown(prueth->rtu[slice]);
819 	rproc_shutdown(prueth->pru[slice]);
820 }
821 
822 /* called back by PHY layer if there is change in link state of hw port*/
823 static void emac_adjust_link(struct net_device *ndev)
824 {
825 	struct prueth_emac *emac = netdev_priv(ndev);
826 	struct phy_device *phydev = ndev->phydev;
827 	struct prueth *prueth = emac->prueth;
828 	bool new_state = false;
829 	unsigned long flags;
830 
831 	if (phydev->link) {
832 		/* check the mode of operation - full/half duplex */
833 		if (phydev->duplex != emac->duplex) {
834 			new_state = true;
835 			emac->duplex = phydev->duplex;
836 		}
837 		if (phydev->speed != emac->speed) {
838 			new_state = true;
839 			emac->speed = phydev->speed;
840 		}
841 		if (!emac->link) {
842 			new_state = true;
843 			emac->link = 1;
844 		}
845 	} else if (emac->link) {
846 		new_state = true;
847 		emac->link = 0;
848 
849 		/* f/w should support 100 & 1000 */
850 		emac->speed = SPEED_1000;
851 
852 		/* half duplex may not be supported by f/w */
853 		emac->duplex = DUPLEX_FULL;
854 	}
855 
856 	if (new_state) {
857 		phy_print_status(phydev);
858 
859 		/* update RGMII and MII configuration based on PHY negotiated
860 		 * values
861 		 */
862 		if (emac->link) {
863 			/* Set the RGMII cfg for gig en and full duplex */
864 			icssg_update_rgmii_cfg(prueth->miig_rt, emac);
865 
866 			/* update the Tx IPG based on 100M/1G speed */
867 			spin_lock_irqsave(&emac->lock, flags);
868 			icssg_config_ipg(emac);
869 			spin_unlock_irqrestore(&emac->lock, flags);
870 			icssg_config_set_speed(emac);
871 			emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
872 
873 		} else {
874 			emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
875 		}
876 	}
877 
878 	if (emac->link) {
879 		/* reactivate the transmit queue */
880 		netif_tx_wake_all_queues(ndev);
881 	} else {
882 		netif_tx_stop_all_queues(ndev);
883 	}
884 }
885 
886 static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
887 {
888 	struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
889 	int rx_flow = PRUETH_RX_FLOW_DATA;
890 	int flow = PRUETH_MAX_RX_FLOWS;
891 	int num_rx = 0;
892 	int cur_budget;
893 	int ret;
894 
895 	while (flow--) {
896 		cur_budget = budget - num_rx;
897 
898 		while (cur_budget--) {
899 			ret = emac_rx_packet(emac, flow);
900 			if (ret)
901 				break;
902 			num_rx++;
903 		}
904 
905 		if (num_rx >= budget)
906 			break;
907 	}
908 
909 	if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
910 		enable_irq(emac->rx_chns.irq[rx_flow]);
911 
912 	return num_rx;
913 }
914 
915 static int prueth_prepare_rx_chan(struct prueth_emac *emac,
916 				  struct prueth_rx_chn *chn,
917 				  int buf_size)
918 {
919 	struct sk_buff *skb;
920 	int i, ret;
921 
922 	for (i = 0; i < chn->descs_num; i++) {
923 		skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
924 		if (!skb)
925 			return -ENOMEM;
926 
927 		ret = prueth_dma_rx_push(emac, skb, chn);
928 		if (ret < 0) {
929 			netdev_err(emac->ndev,
930 				   "cannot submit skb for rx chan %s ret %d\n",
931 				   chn->name, ret);
932 			kfree_skb(skb);
933 			return ret;
934 		}
935 	}
936 
937 	return 0;
938 }
939 
940 static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
941 				 bool free_skb)
942 {
943 	int i;
944 
945 	for (i = 0; i < ch_num; i++) {
946 		if (free_skb)
947 			k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
948 						  &emac->tx_chns[i],
949 						  prueth_tx_cleanup);
950 		k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
951 	}
952 }
953 
954 static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
955 				 int num_flows, bool disable)
956 {
957 	int i;
958 
959 	for (i = 0; i < num_flows; i++)
960 		k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
961 					  prueth_rx_cleanup, !!i);
962 	if (disable)
963 		k3_udma_glue_disable_rx_chn(chn->rx_chn);
964 }
965 
966 static int emac_phy_connect(struct prueth_emac *emac)
967 {
968 	struct prueth *prueth = emac->prueth;
969 	struct net_device *ndev = emac->ndev;
970 	/* connect PHY */
971 	ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
972 				      &emac_adjust_link, 0,
973 				      emac->phy_if);
974 	if (!ndev->phydev) {
975 		dev_err(prueth->dev, "couldn't connect to phy %s\n",
976 			emac->phy_node->full_name);
977 		return -ENODEV;
978 	}
979 
980 	/* remove unsupported modes */
981 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
982 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
983 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
984 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
985 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
986 	phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
987 
988 	if (emac->phy_if == PHY_INTERFACE_MODE_MII)
989 		phy_set_max_speed(ndev->phydev, SPEED_100);
990 
991 	return 0;
992 }
993 
994 /**
995  * emac_ndo_open - EMAC device open
996  * @ndev: network adapter device
997  *
998  * Called when system wants to start the interface.
999  *
1000  * Return: 0 for a successful open, or appropriate error code
1001  */
1002 static int emac_ndo_open(struct net_device *ndev)
1003 {
1004 	struct prueth_emac *emac = netdev_priv(ndev);
1005 	int ret, i, num_data_chn = emac->tx_ch_num;
1006 	struct prueth *prueth = emac->prueth;
1007 	int slice = prueth_emac_slice(emac);
1008 	struct device *dev = prueth->dev;
1009 	int max_rx_flows;
1010 	int rx_flow;
1011 
1012 	/* clear SMEM and MSMC settings for all slices */
1013 	if (!prueth->emacs_initialized) {
1014 		memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
1015 		memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
1016 	}
1017 
1018 	/* set h/w MAC as user might have re-configured */
1019 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1020 
1021 	icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
1022 	icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
1023 
1024 	icssg_class_default(prueth->miig_rt, slice, 0);
1025 
1026 	/* Notify the stack of the actual queue counts. */
1027 	ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
1028 	if (ret) {
1029 		dev_err(dev, "cannot set real number of tx queues\n");
1030 		return ret;
1031 	}
1032 
1033 	init_completion(&emac->cmd_complete);
1034 	ret = prueth_init_tx_chns(emac);
1035 	if (ret) {
1036 		dev_err(dev, "failed to init tx channel: %d\n", ret);
1037 		return ret;
1038 	}
1039 
1040 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
1041 	ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
1042 				  max_rx_flows, PRUETH_MAX_RX_DESC);
1043 	if (ret) {
1044 		dev_err(dev, "failed to init rx channel: %d\n", ret);
1045 		goto cleanup_tx;
1046 	}
1047 
1048 	ret = prueth_ndev_add_tx_napi(emac);
1049 	if (ret)
1050 		goto cleanup_rx;
1051 
1052 	/* we use only the highest priority flow for now i.e. @irq[3] */
1053 	rx_flow = PRUETH_RX_FLOW_DATA;
1054 	ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
1055 			  IRQF_TRIGGER_HIGH, dev_name(dev), emac);
1056 	if (ret) {
1057 		dev_err(dev, "unable to request RX IRQ\n");
1058 		goto cleanup_napi;
1059 	}
1060 
1061 	/* reset and start PRU firmware */
1062 	ret = prueth_emac_start(prueth, emac);
1063 	if (ret)
1064 		goto free_rx_irq;
1065 
1066 	icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
1067 
1068 	/* Prepare RX */
1069 	ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
1070 	if (ret)
1071 		goto stop;
1072 
1073 	ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
1074 	if (ret)
1075 		goto reset_rx_chn;
1076 
1077 	for (i = 0; i < emac->tx_ch_num; i++) {
1078 		ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
1079 		if (ret)
1080 			goto reset_tx_chan;
1081 	}
1082 
1083 	/* Enable NAPI in Tx and Rx direction */
1084 	for (i = 0; i < emac->tx_ch_num; i++)
1085 		napi_enable(&emac->tx_chns[i].napi_tx);
1086 	napi_enable(&emac->napi_rx);
1087 
1088 	/* start PHY */
1089 	phy_start(ndev->phydev);
1090 
1091 	prueth->emacs_initialized++;
1092 
1093 	return 0;
1094 
1095 reset_tx_chan:
1096 	/* Since interface is not yet up, there is wouldn't be
1097 	 * any SKB for completion. So set false to free_skb
1098 	 */
1099 	prueth_reset_tx_chan(emac, i, false);
1100 reset_rx_chn:
1101 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
1102 stop:
1103 	prueth_emac_stop(emac);
1104 free_rx_irq:
1105 	free_irq(emac->rx_chns.irq[rx_flow], emac);
1106 cleanup_napi:
1107 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
1108 cleanup_rx:
1109 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
1110 cleanup_tx:
1111 	prueth_cleanup_tx_chns(emac);
1112 
1113 	return ret;
1114 }
1115 
1116 /**
1117  * emac_ndo_stop - EMAC device stop
1118  * @ndev: network adapter device
1119  *
1120  * Called when system wants to stop or down the interface.
1121  *
1122  * Return: Always 0 (Success)
1123  */
1124 static int emac_ndo_stop(struct net_device *ndev)
1125 {
1126 	struct prueth_emac *emac = netdev_priv(ndev);
1127 	struct prueth *prueth = emac->prueth;
1128 	int rx_flow = PRUETH_RX_FLOW_DATA;
1129 	int max_rx_flows;
1130 	int ret, i;
1131 
1132 	/* inform the upper layers. */
1133 	netif_tx_stop_all_queues(ndev);
1134 
1135 	/* block packets from wire */
1136 	if (ndev->phydev)
1137 		phy_stop(ndev->phydev);
1138 
1139 	icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
1140 
1141 	atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
1142 	/* ensure new tdown_cnt value is visible */
1143 	smp_mb__after_atomic();
1144 	/* tear down and disable UDMA channels */
1145 	reinit_completion(&emac->tdown_complete);
1146 	for (i = 0; i < emac->tx_ch_num; i++)
1147 		k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
1148 
1149 	ret = wait_for_completion_timeout(&emac->tdown_complete,
1150 					  msecs_to_jiffies(1000));
1151 	if (!ret)
1152 		netdev_err(ndev, "tx teardown timeout\n");
1153 
1154 	prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
1155 	for (i = 0; i < emac->tx_ch_num; i++)
1156 		napi_disable(&emac->tx_chns[i].napi_tx);
1157 
1158 	max_rx_flows = PRUETH_MAX_RX_FLOWS;
1159 	k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
1160 
1161 	prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
1162 
1163 	napi_disable(&emac->napi_rx);
1164 
1165 	cancel_work_sync(&emac->rx_mode_work);
1166 
1167 	/* stop PRUs */
1168 	prueth_emac_stop(emac);
1169 
1170 	free_irq(emac->rx_chns.irq[rx_flow], emac);
1171 	prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
1172 	prueth_cleanup_tx_chns(emac);
1173 
1174 	prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
1175 	prueth_cleanup_tx_chns(emac);
1176 
1177 	prueth->emacs_initialized--;
1178 
1179 	return 0;
1180 }
1181 
1182 static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1183 {
1184 	ndev->stats.tx_errors++;
1185 }
1186 
1187 static void emac_ndo_set_rx_mode_work(struct work_struct *work)
1188 {
1189 	struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
1190 	struct net_device *ndev = emac->ndev;
1191 	bool promisc, allmulti;
1192 
1193 	if (!netif_running(ndev))
1194 		return;
1195 
1196 	promisc = ndev->flags & IFF_PROMISC;
1197 	allmulti = ndev->flags & IFF_ALLMULTI;
1198 	emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
1199 	emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
1200 
1201 	if (promisc) {
1202 		emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
1203 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1204 		return;
1205 	}
1206 
1207 	if (allmulti) {
1208 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1209 		return;
1210 	}
1211 
1212 	if (!netdev_mc_empty(ndev)) {
1213 		emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
1214 		return;
1215 	}
1216 }
1217 
1218 /**
1219  * emac_ndo_set_rx_mode - EMAC set receive mode function
1220  * @ndev: The EMAC network adapter
1221  *
1222  * Called when system wants to set the receive mode of the device.
1223  *
1224  */
1225 static void emac_ndo_set_rx_mode(struct net_device *ndev)
1226 {
1227 	struct prueth_emac *emac = netdev_priv(ndev);
1228 
1229 	queue_work(emac->cmd_wq, &emac->rx_mode_work);
1230 }
1231 
1232 static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1233 {
1234 	return phy_do_ioctl(ndev, ifr, cmd);
1235 }
1236 
1237 static const struct net_device_ops emac_netdev_ops = {
1238 	.ndo_open = emac_ndo_open,
1239 	.ndo_stop = emac_ndo_stop,
1240 	.ndo_start_xmit = emac_ndo_start_xmit,
1241 	.ndo_set_mac_address = eth_mac_addr,
1242 	.ndo_validate_addr = eth_validate_addr,
1243 	.ndo_tx_timeout = emac_ndo_tx_timeout,
1244 	.ndo_set_rx_mode = emac_ndo_set_rx_mode,
1245 	.ndo_eth_ioctl = emac_ndo_ioctl,
1246 };
1247 
1248 /* get emac_port corresponding to eth_node name */
1249 static int prueth_node_port(struct device_node *eth_node)
1250 {
1251 	u32 port_id;
1252 	int ret;
1253 
1254 	ret = of_property_read_u32(eth_node, "reg", &port_id);
1255 	if (ret)
1256 		return ret;
1257 
1258 	if (port_id == 0)
1259 		return PRUETH_PORT_MII0;
1260 	else if (port_id == 1)
1261 		return PRUETH_PORT_MII1;
1262 	else
1263 		return PRUETH_PORT_INVALID;
1264 }
1265 
1266 /* get MAC instance corresponding to eth_node name */
1267 static int prueth_node_mac(struct device_node *eth_node)
1268 {
1269 	u32 port_id;
1270 	int ret;
1271 
1272 	ret = of_property_read_u32(eth_node, "reg", &port_id);
1273 	if (ret)
1274 		return ret;
1275 
1276 	if (port_id == 0)
1277 		return PRUETH_MAC0;
1278 	else if (port_id == 1)
1279 		return PRUETH_MAC1;
1280 	else
1281 		return PRUETH_MAC_INVALID;
1282 }
1283 
1284 static int prueth_netdev_init(struct prueth *prueth,
1285 			      struct device_node *eth_node)
1286 {
1287 	int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
1288 	struct prueth_emac *emac;
1289 	struct net_device *ndev;
1290 	enum prueth_port port;
1291 	enum prueth_mac mac;
1292 
1293 	port = prueth_node_port(eth_node);
1294 	if (port == PRUETH_PORT_INVALID)
1295 		return -EINVAL;
1296 
1297 	mac = prueth_node_mac(eth_node);
1298 	if (mac == PRUETH_MAC_INVALID)
1299 		return -EINVAL;
1300 
1301 	ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
1302 	if (!ndev)
1303 		return -ENOMEM;
1304 
1305 	emac = netdev_priv(ndev);
1306 	emac->prueth = prueth;
1307 	emac->ndev = ndev;
1308 	emac->port_id = port;
1309 	emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
1310 	if (!emac->cmd_wq) {
1311 		ret = -ENOMEM;
1312 		goto free_ndev;
1313 	}
1314 	INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
1315 
1316 	ret = pruss_request_mem_region(prueth->pruss,
1317 				       port == PRUETH_PORT_MII0 ?
1318 				       PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
1319 				       &emac->dram);
1320 	if (ret) {
1321 		dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
1322 		ret = -ENOMEM;
1323 		goto free_wq;
1324 	}
1325 
1326 	emac->tx_ch_num = 1;
1327 
1328 	SET_NETDEV_DEV(ndev, prueth->dev);
1329 	spin_lock_init(&emac->lock);
1330 	mutex_init(&emac->cmd_lock);
1331 
1332 	emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
1333 	if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
1334 		dev_err(prueth->dev, "couldn't find phy-handle\n");
1335 		ret = -ENODEV;
1336 		goto free;
1337 	} else if (of_phy_is_fixed_link(eth_node)) {
1338 		ret = of_phy_register_fixed_link(eth_node);
1339 		if (ret) {
1340 			ret = dev_err_probe(prueth->dev, ret,
1341 					    "failed to register fixed-link phy\n");
1342 			goto free;
1343 		}
1344 
1345 		emac->phy_node = eth_node;
1346 	}
1347 
1348 	ret = of_get_phy_mode(eth_node, &emac->phy_if);
1349 	if (ret) {
1350 		dev_err(prueth->dev, "could not get phy-mode property\n");
1351 		goto free;
1352 	}
1353 
1354 	if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
1355 	    !phy_interface_mode_is_rgmii(emac->phy_if)) {
1356 		dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
1357 		ret = -EINVAL;
1358 		goto free;
1359 	}
1360 
1361 	/* AM65 SR2.0 has TX Internal delay always enabled by hardware
1362 	 * and it is not possible to disable TX Internal delay. The below
1363 	 * switch case block describes how we handle different phy modes
1364 	 * based on hardware restriction.
1365 	 */
1366 	switch (emac->phy_if) {
1367 	case PHY_INTERFACE_MODE_RGMII_ID:
1368 		emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
1369 		break;
1370 	case PHY_INTERFACE_MODE_RGMII_TXID:
1371 		emac->phy_if = PHY_INTERFACE_MODE_RGMII;
1372 		break;
1373 	case PHY_INTERFACE_MODE_RGMII:
1374 	case PHY_INTERFACE_MODE_RGMII_RXID:
1375 		dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
1376 		ret = -EINVAL;
1377 		goto free;
1378 	default:
1379 		break;
1380 	}
1381 
1382 	/* get mac address from DT and set private and netdev addr */
1383 	ret = of_get_ethdev_address(eth_node, ndev);
1384 	if (!is_valid_ether_addr(ndev->dev_addr)) {
1385 		eth_hw_addr_random(ndev);
1386 		dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
1387 			 port, ndev->dev_addr);
1388 	}
1389 	ether_addr_copy(emac->mac_addr, ndev->dev_addr);
1390 
1391 	ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
1392 	ndev->max_mtu = PRUETH_MAX_MTU;
1393 	ndev->netdev_ops = &emac_netdev_ops;
1394 	ndev->hw_features = NETIF_F_SG;
1395 	ndev->features = ndev->hw_features;
1396 
1397 	netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
1398 	prueth->emac[mac] = emac;
1399 
1400 	return 0;
1401 
1402 free:
1403 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1404 free_wq:
1405 	destroy_workqueue(emac->cmd_wq);
1406 free_ndev:
1407 	emac->ndev = NULL;
1408 	prueth->emac[mac] = NULL;
1409 	free_netdev(ndev);
1410 
1411 	return ret;
1412 }
1413 
1414 static void prueth_netdev_exit(struct prueth *prueth,
1415 			       struct device_node *eth_node)
1416 {
1417 	struct prueth_emac *emac;
1418 	enum prueth_mac mac;
1419 
1420 	mac = prueth_node_mac(eth_node);
1421 	if (mac == PRUETH_MAC_INVALID)
1422 		return;
1423 
1424 	emac = prueth->emac[mac];
1425 	if (!emac)
1426 		return;
1427 
1428 	if (of_phy_is_fixed_link(emac->phy_node))
1429 		of_phy_deregister_fixed_link(emac->phy_node);
1430 
1431 	netif_napi_del(&emac->napi_rx);
1432 
1433 	pruss_release_mem_region(prueth->pruss, &emac->dram);
1434 	destroy_workqueue(emac->cmd_wq);
1435 	free_netdev(emac->ndev);
1436 	prueth->emac[mac] = NULL;
1437 }
1438 
1439 static int prueth_get_cores(struct prueth *prueth, int slice)
1440 {
1441 	struct device *dev = prueth->dev;
1442 	enum pruss_pru_id pruss_id;
1443 	struct device_node *np;
1444 	int idx = -1, ret;
1445 
1446 	np = dev->of_node;
1447 
1448 	switch (slice) {
1449 	case ICSS_SLICE0:
1450 		idx = 0;
1451 		break;
1452 	case ICSS_SLICE1:
1453 		idx = 3;
1454 		break;
1455 	default:
1456 		return -EINVAL;
1457 	}
1458 
1459 	prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
1460 	if (IS_ERR(prueth->pru[slice])) {
1461 		ret = PTR_ERR(prueth->pru[slice]);
1462 		prueth->pru[slice] = NULL;
1463 		return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
1464 	}
1465 	prueth->pru_id[slice] = pruss_id;
1466 
1467 	idx++;
1468 	prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
1469 	if (IS_ERR(prueth->rtu[slice])) {
1470 		ret = PTR_ERR(prueth->rtu[slice]);
1471 		prueth->rtu[slice] = NULL;
1472 		return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
1473 	}
1474 
1475 	idx++;
1476 	prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
1477 	if (IS_ERR(prueth->txpru[slice])) {
1478 		ret = PTR_ERR(prueth->txpru[slice]);
1479 		prueth->txpru[slice] = NULL;
1480 		return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
1481 	}
1482 
1483 	return 0;
1484 }
1485 
1486 static void prueth_put_cores(struct prueth *prueth, int slice)
1487 {
1488 	if (prueth->txpru[slice])
1489 		pru_rproc_put(prueth->txpru[slice]);
1490 
1491 	if (prueth->rtu[slice])
1492 		pru_rproc_put(prueth->rtu[slice]);
1493 
1494 	if (prueth->pru[slice])
1495 		pru_rproc_put(prueth->pru[slice]);
1496 }
1497 
1498 static const struct of_device_id prueth_dt_match[];
1499 
1500 static int prueth_probe(struct platform_device *pdev)
1501 {
1502 	struct device_node *eth_node, *eth_ports_node;
1503 	struct device_node  *eth0_node = NULL;
1504 	struct device_node  *eth1_node = NULL;
1505 	struct genpool_data_align gp_data = {
1506 		.align = SZ_64K,
1507 	};
1508 	const struct of_device_id *match;
1509 	struct device *dev = &pdev->dev;
1510 	struct device_node *np;
1511 	struct prueth *prueth;
1512 	struct pruss *pruss;
1513 	u32 msmc_ram_size;
1514 	int i, ret;
1515 
1516 	np = dev->of_node;
1517 
1518 	match = of_match_device(prueth_dt_match, dev);
1519 	if (!match)
1520 		return -ENODEV;
1521 
1522 	prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
1523 	if (!prueth)
1524 		return -ENOMEM;
1525 
1526 	dev_set_drvdata(dev, prueth);
1527 	prueth->pdev = pdev;
1528 	prueth->pdata = *(const struct prueth_pdata *)match->data;
1529 
1530 	prueth->dev = dev;
1531 	eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
1532 	if (!eth_ports_node)
1533 		return -ENOENT;
1534 
1535 	for_each_child_of_node(eth_ports_node, eth_node) {
1536 		u32 reg;
1537 
1538 		if (strcmp(eth_node->name, "port"))
1539 			continue;
1540 		ret = of_property_read_u32(eth_node, "reg", &reg);
1541 		if (ret < 0) {
1542 			dev_err(dev, "%pOF error reading port_id %d\n",
1543 				eth_node, ret);
1544 		}
1545 
1546 		of_node_get(eth_node);
1547 
1548 		if (reg == 0) {
1549 			eth0_node = eth_node;
1550 			if (!of_device_is_available(eth0_node)) {
1551 				of_node_put(eth0_node);
1552 				eth0_node = NULL;
1553 			}
1554 		} else if (reg == 1) {
1555 			eth1_node = eth_node;
1556 			if (!of_device_is_available(eth1_node)) {
1557 				of_node_put(eth1_node);
1558 				eth1_node = NULL;
1559 			}
1560 		} else {
1561 			dev_err(dev, "port reg should be 0 or 1\n");
1562 		}
1563 	}
1564 
1565 	of_node_put(eth_ports_node);
1566 
1567 	/* At least one node must be present and available else we fail */
1568 	if (!eth0_node && !eth1_node) {
1569 		dev_err(dev, "neither port0 nor port1 node available\n");
1570 		return -ENODEV;
1571 	}
1572 
1573 	if (eth0_node == eth1_node) {
1574 		dev_err(dev, "port0 and port1 can't have same reg\n");
1575 		of_node_put(eth0_node);
1576 		return -ENODEV;
1577 	}
1578 
1579 	prueth->eth_node[PRUETH_MAC0] = eth0_node;
1580 	prueth->eth_node[PRUETH_MAC1] = eth1_node;
1581 
1582 	prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
1583 	if (IS_ERR(prueth->miig_rt)) {
1584 		dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
1585 		return -ENODEV;
1586 	}
1587 
1588 	prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
1589 	if (IS_ERR(prueth->mii_rt)) {
1590 		dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
1591 		return -ENODEV;
1592 	}
1593 
1594 	if (eth0_node) {
1595 		ret = prueth_get_cores(prueth, ICSS_SLICE0);
1596 		if (ret)
1597 			goto put_cores;
1598 	}
1599 
1600 	if (eth1_node) {
1601 		ret = prueth_get_cores(prueth, ICSS_SLICE1);
1602 		if (ret)
1603 			goto put_cores;
1604 	}
1605 
1606 	pruss = pruss_get(eth0_node ?
1607 			  prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
1608 	if (IS_ERR(pruss)) {
1609 		ret = PTR_ERR(pruss);
1610 		dev_err(dev, "unable to get pruss handle\n");
1611 		goto put_cores;
1612 	}
1613 
1614 	prueth->pruss = pruss;
1615 
1616 	ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
1617 				       &prueth->shram);
1618 	if (ret) {
1619 		dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
1620 		pruss_put(prueth->pruss);
1621 	}
1622 
1623 	prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
1624 	if (!prueth->sram_pool) {
1625 		dev_err(dev, "unable to get SRAM pool\n");
1626 		ret = -ENODEV;
1627 
1628 		goto put_mem;
1629 	}
1630 
1631 	msmc_ram_size = MSMC_RAM_SIZE;
1632 
1633 	/* NOTE: FW bug needs buffer base to be 64KB aligned */
1634 	prueth->msmcram.va =
1635 		(void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
1636 						    msmc_ram_size,
1637 						    gen_pool_first_fit_align,
1638 						    &gp_data);
1639 
1640 	if (!prueth->msmcram.va) {
1641 		ret = -ENOMEM;
1642 		dev_err(dev, "unable to allocate MSMC resource\n");
1643 		goto put_mem;
1644 	}
1645 	prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
1646 						   (unsigned long)prueth->msmcram.va);
1647 	prueth->msmcram.size = msmc_ram_size;
1648 	memset_io(prueth->msmcram.va, 0, msmc_ram_size);
1649 	dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
1650 		prueth->msmcram.va, prueth->msmcram.size);
1651 
1652 	/* setup netdev interfaces */
1653 	if (eth0_node) {
1654 		ret = prueth_netdev_init(prueth, eth0_node);
1655 		if (ret) {
1656 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1657 				      eth0_node->name);
1658 			goto netdev_exit;
1659 		}
1660 	}
1661 
1662 	if (eth1_node) {
1663 		ret = prueth_netdev_init(prueth, eth1_node);
1664 		if (ret) {
1665 			dev_err_probe(dev, ret, "netdev init %s failed\n",
1666 				      eth1_node->name);
1667 			goto netdev_exit;
1668 		}
1669 	}
1670 
1671 	/* register the network devices */
1672 	if (eth0_node) {
1673 		ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
1674 		if (ret) {
1675 			dev_err(dev, "can't register netdev for port MII0");
1676 			goto netdev_exit;
1677 		}
1678 
1679 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
1680 
1681 		emac_phy_connect(prueth->emac[PRUETH_MAC0]);
1682 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
1683 	}
1684 
1685 	if (eth1_node) {
1686 		ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
1687 		if (ret) {
1688 			dev_err(dev, "can't register netdev for port MII1");
1689 			goto netdev_unregister;
1690 		}
1691 
1692 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
1693 		emac_phy_connect(prueth->emac[PRUETH_MAC1]);
1694 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
1695 	}
1696 
1697 	dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
1698 		 (!eth0_node || !eth1_node) ? "single" : "dual");
1699 
1700 	if (eth1_node)
1701 		of_node_put(eth1_node);
1702 	if (eth0_node)
1703 		of_node_put(eth0_node);
1704 	return 0;
1705 
1706 netdev_unregister:
1707 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1708 		if (!prueth->registered_netdevs[i])
1709 			continue;
1710 		if (prueth->emac[i]->ndev->phydev) {
1711 			phy_disconnect(prueth->emac[i]->ndev->phydev);
1712 			prueth->emac[i]->ndev->phydev = NULL;
1713 		}
1714 		unregister_netdev(prueth->registered_netdevs[i]);
1715 	}
1716 
1717 netdev_exit:
1718 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1719 		eth_node = prueth->eth_node[i];
1720 		if (!eth_node)
1721 			continue;
1722 
1723 		prueth_netdev_exit(prueth, eth_node);
1724 	}
1725 
1726 	gen_pool_free(prueth->sram_pool,
1727 		      (unsigned long)prueth->msmcram.va, msmc_ram_size);
1728 
1729 put_mem:
1730 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1731 	pruss_put(prueth->pruss);
1732 
1733 put_cores:
1734 	if (eth1_node) {
1735 		prueth_put_cores(prueth, ICSS_SLICE1);
1736 		of_node_put(eth1_node);
1737 	}
1738 
1739 	if (eth0_node) {
1740 		prueth_put_cores(prueth, ICSS_SLICE0);
1741 		of_node_put(eth0_node);
1742 	}
1743 
1744 	return ret;
1745 }
1746 
1747 static void prueth_remove(struct platform_device *pdev)
1748 {
1749 	struct prueth *prueth = platform_get_drvdata(pdev);
1750 	struct device_node *eth_node;
1751 	int i;
1752 
1753 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1754 		if (!prueth->registered_netdevs[i])
1755 			continue;
1756 		phy_stop(prueth->emac[i]->ndev->phydev);
1757 		phy_disconnect(prueth->emac[i]->ndev->phydev);
1758 		prueth->emac[i]->ndev->phydev = NULL;
1759 		unregister_netdev(prueth->registered_netdevs[i]);
1760 	}
1761 
1762 	for (i = 0; i < PRUETH_NUM_MACS; i++) {
1763 		eth_node = prueth->eth_node[i];
1764 		if (!eth_node)
1765 			continue;
1766 
1767 		prueth_netdev_exit(prueth, eth_node);
1768 	}
1769 
1770 	gen_pool_free(prueth->sram_pool,
1771 		      (unsigned long)prueth->msmcram.va,
1772 		      MSMC_RAM_SIZE);
1773 
1774 	pruss_release_mem_region(prueth->pruss, &prueth->shram);
1775 
1776 	pruss_put(prueth->pruss);
1777 
1778 	if (prueth->eth_node[PRUETH_MAC1])
1779 		prueth_put_cores(prueth, ICSS_SLICE1);
1780 
1781 	if (prueth->eth_node[PRUETH_MAC0])
1782 		prueth_put_cores(prueth, ICSS_SLICE0);
1783 }
1784 
1785 static const struct prueth_pdata am654_icssg_pdata = {
1786 	.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
1787 	.quirk_10m_link_issue = 1,
1788 };
1789 
1790 static const struct of_device_id prueth_dt_match[] = {
1791 	{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
1792 	{ /* sentinel */ }
1793 };
1794 MODULE_DEVICE_TABLE(of, prueth_dt_match);
1795 
1796 static struct platform_driver prueth_driver = {
1797 	.probe = prueth_probe,
1798 	.remove_new = prueth_remove,
1799 	.driver = {
1800 		.name = "icssg-prueth",
1801 		.of_match_table = prueth_dt_match,
1802 	},
1803 };
1804 module_platform_driver(prueth_driver);
1805 
1806 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1807 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1808 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
1809 MODULE_LICENSE("GPL");
1810