xref: /openbmc/linux/drivers/net/ethernet/ti/cpsw.c (revision 113094f7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver
4  *
5  * Copyright (C) 2012 Texas Instruments
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/clk.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/irqreturn.h>
16 #include <linux/interrupt.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/of.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_device.h>
31 #include <linux/if_vlan.h>
32 #include <linux/kmemleak.h>
33 #include <linux/sys_soc.h>
34 #include <net/page_pool.h>
35 #include <linux/bpf.h>
36 #include <linux/bpf_trace.h>
37 #include <linux/filter.h>
38 
39 #include <linux/pinctrl/consumer.h>
40 #include <net/pkt_cls.h>
41 
42 #include "cpsw.h"
43 #include "cpsw_ale.h"
44 #include "cpsw_priv.h"
45 #include "cpsw_sl.h"
46 #include "cpts.h"
47 #include "davinci_cpdma.h"
48 
49 #include <net/pkt_sched.h>
50 
51 static int debug_level;
52 module_param(debug_level, int, 0);
53 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
54 
55 static int ale_ageout = 10;
56 module_param(ale_ageout, int, 0);
57 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
58 
59 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
60 module_param(rx_packet_max, int, 0);
61 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
62 
63 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
64 module_param(descs_pool_size, int, 0444);
65 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
66 
67 /* The buf includes headroom compatible with both skb and xdpf */
68 #define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
69 #define CPSW_HEADROOM  ALIGN(CPSW_HEADROOM_NA, sizeof(long))
70 
71 #define for_each_slave(priv, func, arg...)				\
72 	do {								\
73 		struct cpsw_slave *slave;				\
74 		struct cpsw_common *cpsw = (priv)->cpsw;		\
75 		int n;							\
76 		if (cpsw->data.dual_emac)				\
77 			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
78 		else							\
79 			for (n = cpsw->data.slaves,			\
80 					slave = cpsw->slaves;		\
81 					n; n--)				\
82 				(func)(slave++, ##arg);			\
83 	} while (0)
84 
85 #define CPSW_XMETA_OFFSET	ALIGN(sizeof(struct xdp_frame), sizeof(long))
86 
87 #define CPSW_XDP_CONSUMED		1
88 #define CPSW_XDP_PASS			0
89 
90 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
91 				    __be16 proto, u16 vid);
92 
93 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
94 {
95 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
96 	struct cpsw_ale *ale = cpsw->ale;
97 	int i;
98 
99 	if (cpsw->data.dual_emac) {
100 		bool flag = false;
101 
102 		/* Enabling promiscuous mode for one interface will be
103 		 * common for both the interface as the interface shares
104 		 * the same hardware resource.
105 		 */
106 		for (i = 0; i < cpsw->data.slaves; i++)
107 			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
108 				flag = true;
109 
110 		if (!enable && flag) {
111 			enable = true;
112 			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
113 		}
114 
115 		if (enable) {
116 			/* Enable Bypass */
117 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
118 
119 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
120 		} else {
121 			/* Disable Bypass */
122 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
123 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
124 		}
125 	} else {
126 		if (enable) {
127 			unsigned long timeout = jiffies + HZ;
128 
129 			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
130 			for (i = 0; i <= cpsw->data.slaves; i++) {
131 				cpsw_ale_control_set(ale, i,
132 						     ALE_PORT_NOLEARN, 1);
133 				cpsw_ale_control_set(ale, i,
134 						     ALE_PORT_NO_SA_UPDATE, 1);
135 			}
136 
137 			/* Clear All Untouched entries */
138 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
139 			do {
140 				cpu_relax();
141 				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
142 					break;
143 			} while (time_after(timeout, jiffies));
144 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
145 
146 			/* Clear all mcast from ALE */
147 			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
148 			__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
149 
150 			/* Flood All Unicast Packets to Host port */
151 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
152 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
153 		} else {
154 			/* Don't Flood All Unicast Packets to Host port */
155 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
156 
157 			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
158 			for (i = 0; i <= cpsw->data.slaves; i++) {
159 				cpsw_ale_control_set(ale, i,
160 						     ALE_PORT_NOLEARN, 0);
161 				cpsw_ale_control_set(ale, i,
162 						     ALE_PORT_NO_SA_UPDATE, 0);
163 			}
164 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
165 		}
166 	}
167 }
168 
169 /**
170  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
171  * if it's not deleted
172  * @ndev: device to sync
173  * @addr: address to be added or deleted
174  * @vid: vlan id, if vid < 0 set/unset address for real device
175  * @add: add address if the flag is set or remove otherwise
176  */
177 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
178 		       int vid, int add)
179 {
180 	struct cpsw_priv *priv = netdev_priv(ndev);
181 	struct cpsw_common *cpsw = priv->cpsw;
182 	int mask, flags, ret;
183 
184 	if (vid < 0) {
185 		if (cpsw->data.dual_emac)
186 			vid = cpsw->slaves[priv->emac_port].port_vlan;
187 		else
188 			vid = 0;
189 	}
190 
191 	mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
192 	flags = vid ? ALE_VLAN : 0;
193 
194 	if (add)
195 		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
196 	else
197 		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
198 
199 	return ret;
200 }
201 
202 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
203 {
204 	struct addr_sync_ctx *sync_ctx = ctx;
205 	struct netdev_hw_addr *ha;
206 	int found = 0, ret = 0;
207 
208 	if (!vdev || !(vdev->flags & IFF_UP))
209 		return 0;
210 
211 	/* vlan address is relevant if its sync_cnt != 0 */
212 	netdev_for_each_mc_addr(ha, vdev) {
213 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
214 			found = ha->sync_cnt;
215 			break;
216 		}
217 	}
218 
219 	if (found)
220 		sync_ctx->consumed++;
221 
222 	if (sync_ctx->flush) {
223 		if (!found)
224 			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
225 		return 0;
226 	}
227 
228 	if (found)
229 		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
230 
231 	return ret;
232 }
233 
234 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
235 {
236 	struct addr_sync_ctx sync_ctx;
237 	int ret;
238 
239 	sync_ctx.consumed = 0;
240 	sync_ctx.addr = addr;
241 	sync_ctx.ndev = ndev;
242 	sync_ctx.flush = 0;
243 
244 	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
245 	if (sync_ctx.consumed < num && !ret)
246 		ret = cpsw_set_mc(ndev, addr, -1, 1);
247 
248 	return ret;
249 }
250 
251 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
252 {
253 	struct addr_sync_ctx sync_ctx;
254 
255 	sync_ctx.consumed = 0;
256 	sync_ctx.addr = addr;
257 	sync_ctx.ndev = ndev;
258 	sync_ctx.flush = 1;
259 
260 	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
261 	if (sync_ctx.consumed == num)
262 		cpsw_set_mc(ndev, addr, -1, 0);
263 
264 	return 0;
265 }
266 
267 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
268 {
269 	struct addr_sync_ctx *sync_ctx = ctx;
270 	struct netdev_hw_addr *ha;
271 	int found = 0;
272 
273 	if (!vdev || !(vdev->flags & IFF_UP))
274 		return 0;
275 
276 	/* vlan address is relevant if its sync_cnt != 0 */
277 	netdev_for_each_mc_addr(ha, vdev) {
278 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
279 			found = ha->sync_cnt;
280 			break;
281 		}
282 	}
283 
284 	if (!found)
285 		return 0;
286 
287 	sync_ctx->consumed++;
288 	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
289 	return 0;
290 }
291 
292 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
293 {
294 	struct addr_sync_ctx sync_ctx;
295 
296 	sync_ctx.addr = addr;
297 	sync_ctx.ndev = ndev;
298 	sync_ctx.consumed = 0;
299 
300 	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
301 	if (sync_ctx.consumed < num)
302 		cpsw_set_mc(ndev, addr, -1, 0);
303 
304 	return 0;
305 }
306 
307 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
308 {
309 	struct cpsw_priv *priv = netdev_priv(ndev);
310 	struct cpsw_common *cpsw = priv->cpsw;
311 	int slave_port = -1;
312 
313 	if (cpsw->data.dual_emac)
314 		slave_port = priv->emac_port + 1;
315 
316 	if (ndev->flags & IFF_PROMISC) {
317 		/* Enable promiscuous mode */
318 		cpsw_set_promiscious(ndev, true);
319 		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
320 		return;
321 	} else {
322 		/* Disable promiscuous mode */
323 		cpsw_set_promiscious(ndev, false);
324 	}
325 
326 	/* Restore allmulti on vlans if necessary */
327 	cpsw_ale_set_allmulti(cpsw->ale,
328 			      ndev->flags & IFF_ALLMULTI, slave_port);
329 
330 	/* add/remove mcast address either for real netdev or for vlan */
331 	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
332 			       cpsw_del_mc_addr);
333 }
334 
335 void cpsw_intr_enable(struct cpsw_common *cpsw)
336 {
337 	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
338 	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
339 
340 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
341 	return;
342 }
343 
344 void cpsw_intr_disable(struct cpsw_common *cpsw)
345 {
346 	writel_relaxed(0, &cpsw->wr_regs->tx_en);
347 	writel_relaxed(0, &cpsw->wr_regs->rx_en);
348 
349 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
350 	return;
351 }
352 
353 static int cpsw_is_xdpf_handle(void *handle)
354 {
355 	return (unsigned long)handle & BIT(0);
356 }
357 
358 static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
359 {
360 	return (void *)((unsigned long)xdpf | BIT(0));
361 }
362 
363 static struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
364 {
365 	return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
366 }
367 
368 struct __aligned(sizeof(long)) cpsw_meta_xdp {
369 	struct net_device *ndev;
370 	int ch;
371 };
372 
373 void cpsw_tx_handler(void *token, int len, int status)
374 {
375 	struct cpsw_meta_xdp	*xmeta;
376 	struct xdp_frame	*xdpf;
377 	struct net_device	*ndev;
378 	struct netdev_queue	*txq;
379 	struct sk_buff		*skb;
380 	int			ch;
381 
382 	if (cpsw_is_xdpf_handle(token)) {
383 		xdpf = cpsw_handle_to_xdpf(token);
384 		xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
385 		ndev = xmeta->ndev;
386 		ch = xmeta->ch;
387 		xdp_return_frame(xdpf);
388 	} else {
389 		skb = token;
390 		ndev = skb->dev;
391 		ch = skb_get_queue_mapping(skb);
392 		cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
393 		dev_kfree_skb_any(skb);
394 	}
395 
396 	/* Check whether the queue is stopped due to stalled tx dma, if the
397 	 * queue is stopped then start the queue as we have free desc for tx
398 	 */
399 	txq = netdev_get_tx_queue(ndev, ch);
400 	if (unlikely(netif_tx_queue_stopped(txq)))
401 		netif_tx_wake_queue(txq);
402 
403 	ndev->stats.tx_packets++;
404 	ndev->stats.tx_bytes += len;
405 }
406 
407 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
408 {
409 	struct cpsw_priv *priv = netdev_priv(skb->dev);
410 	struct cpsw_common *cpsw = priv->cpsw;
411 	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
412 	u16 vtag, vid, prio, pkt_type;
413 
414 	/* Remove VLAN header encapsulation word */
415 	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
416 
417 	pkt_type = (rx_vlan_encap_hdr >>
418 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
419 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
420 	/* Ignore unknown & Priority-tagged packets*/
421 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
422 	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
423 		return;
424 
425 	vid = (rx_vlan_encap_hdr >>
426 	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
427 	       VLAN_VID_MASK;
428 	/* Ignore vid 0 and pass packet as is */
429 	if (!vid)
430 		return;
431 	/* Ignore default vlans in dual mac mode */
432 	if (cpsw->data.dual_emac &&
433 	    vid == cpsw->slaves[priv->emac_port].port_vlan)
434 		return;
435 
436 	prio = (rx_vlan_encap_hdr >>
437 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
438 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
439 
440 	vtag = (prio << VLAN_PRIO_SHIFT) | vid;
441 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
442 
443 	/* strip vlan tag for VLAN-tagged packet */
444 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
445 		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
446 		skb_pull(skb, VLAN_HLEN);
447 	}
448 }
449 
450 static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
451 			     struct page *page)
452 {
453 	struct cpsw_common *cpsw = priv->cpsw;
454 	struct cpsw_meta_xdp *xmeta;
455 	struct cpdma_chan *txch;
456 	dma_addr_t dma;
457 	int ret, port;
458 
459 	xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
460 	xmeta->ndev = priv->ndev;
461 	xmeta->ch = 0;
462 	txch = cpsw->txv[0].ch;
463 
464 	port = priv->emac_port + cpsw->data.dual_emac;
465 	if (page) {
466 		dma = page_pool_get_dma_addr(page);
467 		dma += xdpf->headroom + sizeof(struct xdp_frame);
468 		ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
469 					       dma, xdpf->len, port);
470 	} else {
471 		if (sizeof(*xmeta) > xdpf->headroom) {
472 			xdp_return_frame_rx_napi(xdpf);
473 			return -EINVAL;
474 		}
475 
476 		ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
477 					xdpf->data, xdpf->len, port);
478 	}
479 
480 	if (ret) {
481 		priv->ndev->stats.tx_dropped++;
482 		xdp_return_frame_rx_napi(xdpf);
483 	}
484 
485 	return ret;
486 }
487 
488 static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
489 			struct page *page)
490 {
491 	struct cpsw_common *cpsw = priv->cpsw;
492 	struct net_device *ndev = priv->ndev;
493 	int ret = CPSW_XDP_CONSUMED;
494 	struct xdp_frame *xdpf;
495 	struct bpf_prog *prog;
496 	u32 act;
497 
498 	rcu_read_lock();
499 
500 	prog = READ_ONCE(priv->xdp_prog);
501 	if (!prog) {
502 		ret = CPSW_XDP_PASS;
503 		goto out;
504 	}
505 
506 	act = bpf_prog_run_xdp(prog, xdp);
507 	switch (act) {
508 	case XDP_PASS:
509 		ret = CPSW_XDP_PASS;
510 		break;
511 	case XDP_TX:
512 		xdpf = convert_to_xdp_frame(xdp);
513 		if (unlikely(!xdpf))
514 			goto drop;
515 
516 		cpsw_xdp_tx_frame(priv, xdpf, page);
517 		break;
518 	case XDP_REDIRECT:
519 		if (xdp_do_redirect(ndev, xdp, prog))
520 			goto drop;
521 
522 		/*  Have to flush here, per packet, instead of doing it in bulk
523 		 *  at the end of the napi handler. The RX devices on this
524 		 *  particular hardware is sharing a common queue, so the
525 		 *  incoming device might change per packet.
526 		 */
527 		xdp_do_flush_map();
528 		break;
529 	default:
530 		bpf_warn_invalid_xdp_action(act);
531 		/* fall through */
532 	case XDP_ABORTED:
533 		trace_xdp_exception(ndev, prog, act);
534 		/* fall through -- handle aborts by dropping packet */
535 	case XDP_DROP:
536 		goto drop;
537 	}
538 out:
539 	rcu_read_unlock();
540 	return ret;
541 drop:
542 	rcu_read_unlock();
543 	page_pool_recycle_direct(cpsw->page_pool[ch], page);
544 	return ret;
545 }
546 
547 static unsigned int cpsw_rxbuf_total_len(unsigned int len)
548 {
549 	len += CPSW_HEADROOM;
550 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
551 
552 	return SKB_DATA_ALIGN(len);
553 }
554 
555 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
556 					       int size)
557 {
558 	struct page_pool_params pp_params;
559 	struct page_pool *pool;
560 
561 	pp_params.order = 0;
562 	pp_params.flags = PP_FLAG_DMA_MAP;
563 	pp_params.pool_size = size;
564 	pp_params.nid = NUMA_NO_NODE;
565 	pp_params.dma_dir = DMA_BIDIRECTIONAL;
566 	pp_params.dev = cpsw->dev;
567 
568 	pool = page_pool_create(&pp_params);
569 	if (IS_ERR(pool))
570 		dev_err(cpsw->dev, "cannot create rx page pool\n");
571 
572 	return pool;
573 }
574 
575 static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
576 {
577 	struct cpsw_common *cpsw = priv->cpsw;
578 	struct xdp_rxq_info *rxq;
579 	struct page_pool *pool;
580 	int ret;
581 
582 	pool = cpsw->page_pool[ch];
583 	rxq = &priv->xdp_rxq[ch];
584 
585 	ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
586 	if (ret)
587 		return ret;
588 
589 	ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
590 	if (ret)
591 		xdp_rxq_info_unreg(rxq);
592 
593 	return ret;
594 }
595 
596 static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
597 {
598 	struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
599 
600 	if (!xdp_rxq_info_is_reg(rxq))
601 		return;
602 
603 	xdp_rxq_info_unreg(rxq);
604 }
605 
606 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
607 {
608 	struct page_pool *pool;
609 	int ret = 0, pool_size;
610 
611 	pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
612 	pool = cpsw_create_page_pool(cpsw, pool_size);
613 	if (IS_ERR(pool))
614 		ret = PTR_ERR(pool);
615 	else
616 		cpsw->page_pool[ch] = pool;
617 
618 	return ret;
619 }
620 
621 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
622 {
623 	struct net_device *ndev;
624 	int i, ch;
625 
626 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
627 		for (i = 0; i < cpsw->data.slaves; i++) {
628 			ndev = cpsw->slaves[i].ndev;
629 			if (!ndev)
630 				continue;
631 
632 			cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
633 		}
634 
635 		page_pool_destroy(cpsw->page_pool[ch]);
636 		cpsw->page_pool[ch] = NULL;
637 	}
638 }
639 
640 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
641 {
642 	struct net_device *ndev;
643 	int i, ch, ret;
644 
645 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
646 		ret = cpsw_create_rx_pool(cpsw, ch);
647 		if (ret)
648 			goto err_cleanup;
649 
650 		/* using same page pool is allowed as no running rx handlers
651 		 * simultaneously for both ndevs
652 		 */
653 		for (i = 0; i < cpsw->data.slaves; i++) {
654 			ndev = cpsw->slaves[i].ndev;
655 			if (!ndev)
656 				continue;
657 
658 			ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
659 			if (ret)
660 				goto err_cleanup;
661 		}
662 	}
663 
664 	return 0;
665 
666 err_cleanup:
667 	cpsw_destroy_xdp_rxqs(cpsw);
668 
669 	return ret;
670 }
671 
672 static void cpsw_rx_handler(void *token, int len, int status)
673 {
674 	struct page		*new_page, *page = token;
675 	void			*pa = page_address(page);
676 	struct cpsw_meta_xdp	*xmeta = pa + CPSW_XMETA_OFFSET;
677 	struct cpsw_common	*cpsw = ndev_to_cpsw(xmeta->ndev);
678 	int			pkt_size = cpsw->rx_packet_max;
679 	int			ret = 0, port, ch = xmeta->ch;
680 	int			headroom = CPSW_HEADROOM;
681 	struct net_device	*ndev = xmeta->ndev;
682 	struct cpsw_priv	*priv;
683 	struct page_pool	*pool;
684 	struct sk_buff		*skb;
685 	struct xdp_buff		xdp;
686 	dma_addr_t		dma;
687 
688 	if (cpsw->data.dual_emac && status >= 0) {
689 		port = CPDMA_RX_SOURCE_PORT(status);
690 		if (port)
691 			ndev = cpsw->slaves[--port].ndev;
692 	}
693 
694 	priv = netdev_priv(ndev);
695 	pool = cpsw->page_pool[ch];
696 	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
697 		/* In dual emac mode check for all interfaces */
698 		if (cpsw->data.dual_emac && cpsw->usage_count &&
699 		    (status >= 0)) {
700 			/* The packet received is for the interface which
701 			 * is already down and the other interface is up
702 			 * and running, instead of freeing which results
703 			 * in reducing of the number of rx descriptor in
704 			 * DMA engine, requeue page back to cpdma.
705 			 */
706 			new_page = page;
707 			goto requeue;
708 		}
709 
710 		/* the interface is going down, pages are purged */
711 		page_pool_recycle_direct(pool, page);
712 		return;
713 	}
714 
715 	new_page = page_pool_dev_alloc_pages(pool);
716 	if (unlikely(!new_page)) {
717 		new_page = page;
718 		ndev->stats.rx_dropped++;
719 		goto requeue;
720 	}
721 
722 	if (priv->xdp_prog) {
723 		if (status & CPDMA_RX_VLAN_ENCAP) {
724 			xdp.data = pa + CPSW_HEADROOM +
725 				   CPSW_RX_VLAN_ENCAP_HDR_SIZE;
726 			xdp.data_end = xdp.data + len -
727 				       CPSW_RX_VLAN_ENCAP_HDR_SIZE;
728 		} else {
729 			xdp.data = pa + CPSW_HEADROOM;
730 			xdp.data_end = xdp.data + len;
731 		}
732 
733 		xdp_set_data_meta_invalid(&xdp);
734 
735 		xdp.data_hard_start = pa;
736 		xdp.rxq = &priv->xdp_rxq[ch];
737 
738 		ret = cpsw_run_xdp(priv, ch, &xdp, page);
739 		if (ret != CPSW_XDP_PASS)
740 			goto requeue;
741 
742 		/* XDP prog might have changed packet data and boundaries */
743 		len = xdp.data_end - xdp.data;
744 		headroom = xdp.data - xdp.data_hard_start;
745 
746 		/* XDP prog can modify vlan tag, so can't use encap header */
747 		status &= ~CPDMA_RX_VLAN_ENCAP;
748 	}
749 
750 	/* pass skb to netstack if no XDP prog or returned XDP_PASS */
751 	skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
752 	if (!skb) {
753 		ndev->stats.rx_dropped++;
754 		page_pool_recycle_direct(pool, page);
755 		goto requeue;
756 	}
757 
758 	skb_reserve(skb, headroom);
759 	skb_put(skb, len);
760 	skb->dev = ndev;
761 	if (status & CPDMA_RX_VLAN_ENCAP)
762 		cpsw_rx_vlan_encap(skb);
763 	if (priv->rx_ts_enabled)
764 		cpts_rx_timestamp(cpsw->cpts, skb);
765 	skb->protocol = eth_type_trans(skb, ndev);
766 
767 	/* unmap page as no netstack skb page recycling */
768 	page_pool_release_page(pool, page);
769 	netif_receive_skb(skb);
770 
771 	ndev->stats.rx_bytes += len;
772 	ndev->stats.rx_packets++;
773 
774 requeue:
775 	xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
776 	xmeta->ndev = ndev;
777 	xmeta->ch = ch;
778 
779 	dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
780 	ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
781 				       pkt_size, 0);
782 	if (ret < 0) {
783 		WARN_ON(ret == -ENOMEM);
784 		page_pool_recycle_direct(pool, new_page);
785 	}
786 }
787 
788 void cpsw_split_res(struct cpsw_common *cpsw)
789 {
790 	u32 consumed_rate = 0, bigest_rate = 0;
791 	struct cpsw_vector *txv = cpsw->txv;
792 	int i, ch_weight, rlim_ch_num = 0;
793 	int budget, bigest_rate_ch = 0;
794 	u32 ch_rate, max_rate;
795 	int ch_budget = 0;
796 
797 	for (i = 0; i < cpsw->tx_ch_num; i++) {
798 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
799 		if (!ch_rate)
800 			continue;
801 
802 		rlim_ch_num++;
803 		consumed_rate += ch_rate;
804 	}
805 
806 	if (cpsw->tx_ch_num == rlim_ch_num) {
807 		max_rate = consumed_rate;
808 	} else if (!rlim_ch_num) {
809 		ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
810 		bigest_rate = 0;
811 		max_rate = consumed_rate;
812 	} else {
813 		max_rate = cpsw->speed * 1000;
814 
815 		/* if max_rate is less then expected due to reduced link speed,
816 		 * split proportionally according next potential max speed
817 		 */
818 		if (max_rate < consumed_rate)
819 			max_rate *= 10;
820 
821 		if (max_rate < consumed_rate)
822 			max_rate *= 10;
823 
824 		ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
825 		ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
826 			    (cpsw->tx_ch_num - rlim_ch_num);
827 		bigest_rate = (max_rate - consumed_rate) /
828 			      (cpsw->tx_ch_num - rlim_ch_num);
829 	}
830 
831 	/* split tx weight/budget */
832 	budget = CPSW_POLL_WEIGHT;
833 	for (i = 0; i < cpsw->tx_ch_num; i++) {
834 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
835 		if (ch_rate) {
836 			txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
837 			if (!txv[i].budget)
838 				txv[i].budget++;
839 			if (ch_rate > bigest_rate) {
840 				bigest_rate_ch = i;
841 				bigest_rate = ch_rate;
842 			}
843 
844 			ch_weight = (ch_rate * 100) / max_rate;
845 			if (!ch_weight)
846 				ch_weight++;
847 			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
848 		} else {
849 			txv[i].budget = ch_budget;
850 			if (!bigest_rate_ch)
851 				bigest_rate_ch = i;
852 			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
853 		}
854 
855 		budget -= txv[i].budget;
856 	}
857 
858 	if (budget)
859 		txv[bigest_rate_ch].budget += budget;
860 
861 	/* split rx budget */
862 	budget = CPSW_POLL_WEIGHT;
863 	ch_budget = budget / cpsw->rx_ch_num;
864 	for (i = 0; i < cpsw->rx_ch_num; i++) {
865 		cpsw->rxv[i].budget = ch_budget;
866 		budget -= ch_budget;
867 	}
868 
869 	if (budget)
870 		cpsw->rxv[0].budget += budget;
871 }
872 
873 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
874 {
875 	struct cpsw_common *cpsw = dev_id;
876 
877 	writel(0, &cpsw->wr_regs->tx_en);
878 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
879 
880 	if (cpsw->quirk_irq) {
881 		disable_irq_nosync(cpsw->irqs_table[1]);
882 		cpsw->tx_irq_disabled = true;
883 	}
884 
885 	napi_schedule(&cpsw->napi_tx);
886 	return IRQ_HANDLED;
887 }
888 
889 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
890 {
891 	struct cpsw_common *cpsw = dev_id;
892 
893 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
894 	writel(0, &cpsw->wr_regs->rx_en);
895 
896 	if (cpsw->quirk_irq) {
897 		disable_irq_nosync(cpsw->irqs_table[0]);
898 		cpsw->rx_irq_disabled = true;
899 	}
900 
901 	napi_schedule(&cpsw->napi_rx);
902 	return IRQ_HANDLED;
903 }
904 
905 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
906 {
907 	u32			ch_map;
908 	int			num_tx, cur_budget, ch;
909 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
910 	struct cpsw_vector	*txv;
911 
912 	/* process every unprocessed channel */
913 	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
914 	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
915 		if (!(ch_map & 0x80))
916 			continue;
917 
918 		txv = &cpsw->txv[ch];
919 		if (unlikely(txv->budget > budget - num_tx))
920 			cur_budget = budget - num_tx;
921 		else
922 			cur_budget = txv->budget;
923 
924 		num_tx += cpdma_chan_process(txv->ch, cur_budget);
925 		if (num_tx >= budget)
926 			break;
927 	}
928 
929 	if (num_tx < budget) {
930 		napi_complete(napi_tx);
931 		writel(0xff, &cpsw->wr_regs->tx_en);
932 	}
933 
934 	return num_tx;
935 }
936 
937 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
938 {
939 	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
940 	int num_tx;
941 
942 	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
943 	if (num_tx < budget) {
944 		napi_complete(napi_tx);
945 		writel(0xff, &cpsw->wr_regs->tx_en);
946 		if (cpsw->tx_irq_disabled) {
947 			cpsw->tx_irq_disabled = false;
948 			enable_irq(cpsw->irqs_table[1]);
949 		}
950 	}
951 
952 	return num_tx;
953 }
954 
955 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
956 {
957 	u32			ch_map;
958 	int			num_rx, cur_budget, ch;
959 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
960 	struct cpsw_vector	*rxv;
961 
962 	/* process every unprocessed channel */
963 	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
964 	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
965 		if (!(ch_map & 0x01))
966 			continue;
967 
968 		rxv = &cpsw->rxv[ch];
969 		if (unlikely(rxv->budget > budget - num_rx))
970 			cur_budget = budget - num_rx;
971 		else
972 			cur_budget = rxv->budget;
973 
974 		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
975 		if (num_rx >= budget)
976 			break;
977 	}
978 
979 	if (num_rx < budget) {
980 		napi_complete_done(napi_rx, num_rx);
981 		writel(0xff, &cpsw->wr_regs->rx_en);
982 	}
983 
984 	return num_rx;
985 }
986 
987 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
988 {
989 	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
990 	int num_rx;
991 
992 	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
993 	if (num_rx < budget) {
994 		napi_complete_done(napi_rx, num_rx);
995 		writel(0xff, &cpsw->wr_regs->rx_en);
996 		if (cpsw->rx_irq_disabled) {
997 			cpsw->rx_irq_disabled = false;
998 			enable_irq(cpsw->irqs_table[0]);
999 		}
1000 	}
1001 
1002 	return num_rx;
1003 }
1004 
1005 static inline void soft_reset(const char *module, void __iomem *reg)
1006 {
1007 	unsigned long timeout = jiffies + HZ;
1008 
1009 	writel_relaxed(1, reg);
1010 	do {
1011 		cpu_relax();
1012 	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1013 
1014 	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1015 }
1016 
1017 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1018 			       struct cpsw_priv *priv)
1019 {
1020 	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1021 	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1022 }
1023 
1024 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1025 {
1026 	struct cpsw_common *cpsw = priv->cpsw;
1027 	struct cpsw_slave *slave;
1028 	u32 shift, mask, val;
1029 
1030 	val = readl_relaxed(&cpsw->regs->ptype);
1031 
1032 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1033 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1034 	mask = 7 << shift;
1035 	val = val & mask;
1036 
1037 	return !val;
1038 }
1039 
1040 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1041 {
1042 	struct cpsw_common *cpsw = priv->cpsw;
1043 	struct cpsw_slave *slave;
1044 	u32 shift, mask, val;
1045 
1046 	val = readl_relaxed(&cpsw->regs->ptype);
1047 
1048 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1049 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1050 	mask = (1 << --fifo) << shift;
1051 	val = on ? val | mask : val & ~mask;
1052 
1053 	writel_relaxed(val, &cpsw->regs->ptype);
1054 }
1055 
1056 static void _cpsw_adjust_link(struct cpsw_slave *slave,
1057 			      struct cpsw_priv *priv, bool *link)
1058 {
1059 	struct phy_device	*phy = slave->phy;
1060 	u32			mac_control = 0;
1061 	u32			slave_port;
1062 	struct cpsw_common *cpsw = priv->cpsw;
1063 
1064 	if (!phy)
1065 		return;
1066 
1067 	slave_port = cpsw_get_slave_port(slave->slave_num);
1068 
1069 	if (phy->link) {
1070 		mac_control = CPSW_SL_CTL_GMII_EN;
1071 
1072 		if (phy->speed == 1000)
1073 			mac_control |= CPSW_SL_CTL_GIG;
1074 		if (phy->duplex)
1075 			mac_control |= CPSW_SL_CTL_FULLDUPLEX;
1076 
1077 		/* set speed_in input in case RMII mode is used in 100Mbps */
1078 		if (phy->speed == 100)
1079 			mac_control |= CPSW_SL_CTL_IFCTL_A;
1080 		/* in band mode only works in 10Mbps RGMII mode */
1081 		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1082 			mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
1083 
1084 		if (priv->rx_pause)
1085 			mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
1086 
1087 		if (priv->tx_pause)
1088 			mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
1089 
1090 		if (mac_control != slave->mac_control)
1091 			cpsw_sl_ctl_set(slave->mac_sl, mac_control);
1092 
1093 		/* enable forwarding */
1094 		cpsw_ale_control_set(cpsw->ale, slave_port,
1095 				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1096 
1097 		*link = true;
1098 
1099 		if (priv->shp_cfg_speed &&
1100 		    priv->shp_cfg_speed != slave->phy->speed &&
1101 		    !cpsw_shp_is_off(priv))
1102 			dev_warn(priv->dev,
1103 				 "Speed was changed, CBS shaper speeds are changed!");
1104 	} else {
1105 		mac_control = 0;
1106 		/* disable forwarding */
1107 		cpsw_ale_control_set(cpsw->ale, slave_port,
1108 				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1109 
1110 		cpsw_sl_wait_for_idle(slave->mac_sl, 100);
1111 
1112 		cpsw_sl_ctl_reset(slave->mac_sl);
1113 	}
1114 
1115 	if (mac_control != slave->mac_control)
1116 		phy_print_status(phy);
1117 
1118 	slave->mac_control = mac_control;
1119 }
1120 
1121 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1122 {
1123 	int i, speed;
1124 
1125 	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1126 		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1127 			speed += cpsw->slaves[i].phy->speed;
1128 
1129 	return speed;
1130 }
1131 
1132 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1133 {
1134 	int i, rlim_ch_num;
1135 	int speed, ch_rate;
1136 
1137 	/* re-split resources only in case speed was changed */
1138 	speed = cpsw_get_common_speed(cpsw);
1139 	if (speed == cpsw->speed || !speed)
1140 		return 0;
1141 
1142 	cpsw->speed = speed;
1143 
1144 	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1145 		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1146 		if (!ch_rate)
1147 			break;
1148 
1149 		rlim_ch_num++;
1150 	}
1151 
1152 	/* cases not dependent on speed */
1153 	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1154 		return 0;
1155 
1156 	return 1;
1157 }
1158 
1159 static void cpsw_adjust_link(struct net_device *ndev)
1160 {
1161 	struct cpsw_priv	*priv = netdev_priv(ndev);
1162 	struct cpsw_common	*cpsw = priv->cpsw;
1163 	bool			link = false;
1164 
1165 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1166 
1167 	if (link) {
1168 		if (cpsw_need_resplit(cpsw))
1169 			cpsw_split_res(cpsw);
1170 
1171 		netif_carrier_on(ndev);
1172 		if (netif_running(ndev))
1173 			netif_tx_wake_all_queues(ndev);
1174 	} else {
1175 		netif_carrier_off(ndev);
1176 		netif_tx_stop_all_queues(ndev);
1177 	}
1178 }
1179 
1180 static inline void cpsw_add_dual_emac_def_ale_entries(
1181 		struct cpsw_priv *priv, struct cpsw_slave *slave,
1182 		u32 slave_port)
1183 {
1184 	struct cpsw_common *cpsw = priv->cpsw;
1185 	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1186 
1187 	if (cpsw->version == CPSW_VERSION_1)
1188 		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1189 	else
1190 		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1191 	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1192 			  port_mask, port_mask, 0);
1193 	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1194 			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
1195 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1196 			   HOST_PORT_NUM, ALE_VLAN |
1197 			   ALE_SECURE, slave->port_vlan);
1198 	cpsw_ale_control_set(cpsw->ale, slave_port,
1199 			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1200 }
1201 
1202 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1203 {
1204 	u32 slave_port;
1205 	struct phy_device *phy;
1206 	struct cpsw_common *cpsw = priv->cpsw;
1207 
1208 	cpsw_sl_reset(slave->mac_sl, 100);
1209 	cpsw_sl_ctl_reset(slave->mac_sl);
1210 
1211 	/* setup priority mapping */
1212 	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
1213 			  RX_PRIORITY_MAPPING);
1214 
1215 	switch (cpsw->version) {
1216 	case CPSW_VERSION_1:
1217 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1218 		/* Increase RX FIFO size to 5 for supporting fullduplex
1219 		 * flow control mode
1220 		 */
1221 		slave_write(slave,
1222 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1223 			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1224 		break;
1225 	case CPSW_VERSION_2:
1226 	case CPSW_VERSION_3:
1227 	case CPSW_VERSION_4:
1228 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1229 		/* Increase RX FIFO size to 5 for supporting fullduplex
1230 		 * flow control mode
1231 		 */
1232 		slave_write(slave,
1233 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1234 			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1235 		break;
1236 	}
1237 
1238 	/* setup max packet size, and mac address */
1239 	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
1240 			  cpsw->rx_packet_max);
1241 	cpsw_set_slave_mac(slave, priv);
1242 
1243 	slave->mac_control = 0;	/* no link yet */
1244 
1245 	slave_port = cpsw_get_slave_port(slave->slave_num);
1246 
1247 	if (cpsw->data.dual_emac)
1248 		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1249 	else
1250 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1251 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1252 
1253 	if (slave->data->phy_node) {
1254 		phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1255 				 &cpsw_adjust_link, 0, slave->data->phy_if);
1256 		if (!phy) {
1257 			dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1258 				slave->data->phy_node,
1259 				slave->slave_num);
1260 			return;
1261 		}
1262 	} else {
1263 		phy = phy_connect(priv->ndev, slave->data->phy_id,
1264 				 &cpsw_adjust_link, slave->data->phy_if);
1265 		if (IS_ERR(phy)) {
1266 			dev_err(priv->dev,
1267 				"phy \"%s\" not found on slave %d, err %ld\n",
1268 				slave->data->phy_id, slave->slave_num,
1269 				PTR_ERR(phy));
1270 			return;
1271 		}
1272 	}
1273 
1274 	slave->phy = phy;
1275 
1276 	phy_attached_info(slave->phy);
1277 
1278 	phy_start(slave->phy);
1279 
1280 	/* Configure GMII_SEL register */
1281 	if (!IS_ERR(slave->data->ifphy))
1282 		phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
1283 				 slave->data->phy_if);
1284 	else
1285 		cpsw_phy_sel(cpsw->dev, slave->phy->interface,
1286 			     slave->slave_num);
1287 }
1288 
1289 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1290 {
1291 	struct cpsw_common *cpsw = priv->cpsw;
1292 	const int vlan = cpsw->data.default_vlan;
1293 	u32 reg;
1294 	int i;
1295 	int unreg_mcast_mask;
1296 
1297 	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1298 	       CPSW2_PORT_VLAN;
1299 
1300 	writel(vlan, &cpsw->host_port_regs->port_vlan);
1301 
1302 	for (i = 0; i < cpsw->data.slaves; i++)
1303 		slave_write(cpsw->slaves + i, vlan, reg);
1304 
1305 	if (priv->ndev->flags & IFF_ALLMULTI)
1306 		unreg_mcast_mask = ALE_ALL_PORTS;
1307 	else
1308 		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1309 
1310 	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1311 			  ALE_ALL_PORTS, ALE_ALL_PORTS,
1312 			  unreg_mcast_mask);
1313 }
1314 
1315 static void cpsw_init_host_port(struct cpsw_priv *priv)
1316 {
1317 	u32 fifo_mode;
1318 	u32 control_reg;
1319 	struct cpsw_common *cpsw = priv->cpsw;
1320 
1321 	/* soft reset the controller and initialize ale */
1322 	soft_reset("cpsw", &cpsw->regs->soft_reset);
1323 	cpsw_ale_start(cpsw->ale);
1324 
1325 	/* switch to vlan unaware mode */
1326 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1327 			     CPSW_ALE_VLAN_AWARE);
1328 	control_reg = readl(&cpsw->regs->control);
1329 	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1330 	writel(control_reg, &cpsw->regs->control);
1331 	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1332 		     CPSW_FIFO_NORMAL_MODE;
1333 	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1334 
1335 	/* setup host port priority mapping */
1336 	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1337 		       &cpsw->host_port_regs->cpdma_tx_pri_map);
1338 	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1339 
1340 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1341 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1342 
1343 	if (!cpsw->data.dual_emac) {
1344 		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1345 				   0, 0);
1346 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1347 				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1348 	}
1349 }
1350 
1351 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1352 {
1353 	struct cpsw_common *cpsw = priv->cpsw;
1354 	struct cpsw_meta_xdp *xmeta;
1355 	struct page_pool *pool;
1356 	struct page *page;
1357 	int ch_buf_num;
1358 	int ch, i, ret;
1359 	dma_addr_t dma;
1360 
1361 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1362 		pool = cpsw->page_pool[ch];
1363 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1364 		for (i = 0; i < ch_buf_num; i++) {
1365 			page = page_pool_dev_alloc_pages(pool);
1366 			if (!page) {
1367 				cpsw_err(priv, ifup, "allocate rx page err\n");
1368 				return -ENOMEM;
1369 			}
1370 
1371 			xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1372 			xmeta->ndev = priv->ndev;
1373 			xmeta->ch = ch;
1374 
1375 			dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
1376 			ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1377 							    page, dma,
1378 							    cpsw->rx_packet_max,
1379 							    0);
1380 			if (ret < 0) {
1381 				cpsw_err(priv, ifup,
1382 					 "cannot submit page to channel %d rx, error %d\n",
1383 					 ch, ret);
1384 				page_pool_recycle_direct(pool, page);
1385 				return ret;
1386 			}
1387 		}
1388 
1389 		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1390 			  ch, ch_buf_num);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1397 {
1398 	u32 slave_port;
1399 
1400 	slave_port = cpsw_get_slave_port(slave->slave_num);
1401 
1402 	if (!slave->phy)
1403 		return;
1404 	phy_stop(slave->phy);
1405 	phy_disconnect(slave->phy);
1406 	slave->phy = NULL;
1407 	cpsw_ale_control_set(cpsw->ale, slave_port,
1408 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1409 	cpsw_sl_reset(slave->mac_sl, 100);
1410 	cpsw_sl_ctl_reset(slave->mac_sl);
1411 }
1412 
1413 static int cpsw_tc_to_fifo(int tc, int num_tc)
1414 {
1415 	if (tc == num_tc - 1)
1416 		return 0;
1417 
1418 	return CPSW_FIFO_SHAPERS_NUM - tc;
1419 }
1420 
1421 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1422 {
1423 	struct cpsw_common *cpsw = priv->cpsw;
1424 	u32 val = 0, send_pct, shift;
1425 	struct cpsw_slave *slave;
1426 	int pct = 0, i;
1427 
1428 	if (bw > priv->shp_cfg_speed * 1000)
1429 		goto err;
1430 
1431 	/* shaping has to stay enabled for highest fifos linearly
1432 	 * and fifo bw no more then interface can allow
1433 	 */
1434 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1435 	send_pct = slave_read(slave, SEND_PERCENT);
1436 	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1437 		if (!bw) {
1438 			if (i >= fifo || !priv->fifo_bw[i])
1439 				continue;
1440 
1441 			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1442 			continue;
1443 		}
1444 
1445 		if (!priv->fifo_bw[i] && i > fifo) {
1446 			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1447 			return -EINVAL;
1448 		}
1449 
1450 		shift = (i - 1) * 8;
1451 		if (i == fifo) {
1452 			send_pct &= ~(CPSW_PCT_MASK << shift);
1453 			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1454 			if (!val)
1455 				val = 1;
1456 
1457 			send_pct |= val << shift;
1458 			pct += val;
1459 			continue;
1460 		}
1461 
1462 		if (priv->fifo_bw[i])
1463 			pct += (send_pct >> shift) & CPSW_PCT_MASK;
1464 	}
1465 
1466 	if (pct >= 100)
1467 		goto err;
1468 
1469 	slave_write(slave, send_pct, SEND_PERCENT);
1470 	priv->fifo_bw[fifo] = bw;
1471 
1472 	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1473 		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1474 
1475 	return 0;
1476 err:
1477 	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1478 	return -EINVAL;
1479 }
1480 
1481 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1482 {
1483 	struct cpsw_common *cpsw = priv->cpsw;
1484 	struct cpsw_slave *slave;
1485 	u32 tx_in_ctl_rg, val;
1486 	int ret;
1487 
1488 	ret = cpsw_set_fifo_bw(priv, fifo, bw);
1489 	if (ret)
1490 		return ret;
1491 
1492 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1493 	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1494 		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1495 
1496 	if (!bw)
1497 		cpsw_fifo_shp_on(priv, fifo, bw);
1498 
1499 	val = slave_read(slave, tx_in_ctl_rg);
1500 	if (cpsw_shp_is_off(priv)) {
1501 		/* disable FIFOs rate limited queues */
1502 		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1503 
1504 		/* set type of FIFO queues to normal priority mode */
1505 		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1506 
1507 		/* set type of FIFO queues to be rate limited */
1508 		if (bw)
1509 			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1510 		else
1511 			priv->shp_cfg_speed = 0;
1512 	}
1513 
1514 	/* toggle a FIFO rate limited queue */
1515 	if (bw)
1516 		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1517 	else
1518 		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1519 	slave_write(slave, val, tx_in_ctl_rg);
1520 
1521 	/* FIFO transmit shape enable */
1522 	cpsw_fifo_shp_on(priv, fifo, bw);
1523 	return 0;
1524 }
1525 
1526 /* Defaults:
1527  * class A - prio 3
1528  * class B - prio 2
1529  * shaping for class A should be set first
1530  */
1531 static int cpsw_set_cbs(struct net_device *ndev,
1532 			struct tc_cbs_qopt_offload *qopt)
1533 {
1534 	struct cpsw_priv *priv = netdev_priv(ndev);
1535 	struct cpsw_common *cpsw = priv->cpsw;
1536 	struct cpsw_slave *slave;
1537 	int prev_speed = 0;
1538 	int tc, ret, fifo;
1539 	u32 bw = 0;
1540 
1541 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1542 
1543 	/* enable channels in backward order, as highest FIFOs must be rate
1544 	 * limited first and for compliance with CPDMA rate limited channels
1545 	 * that also used in bacward order. FIFO0 cannot be rate limited.
1546 	 */
1547 	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1548 	if (!fifo) {
1549 		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1550 		return -EINVAL;
1551 	}
1552 
1553 	/* do nothing, it's disabled anyway */
1554 	if (!qopt->enable && !priv->fifo_bw[fifo])
1555 		return 0;
1556 
1557 	/* shapers can be set if link speed is known */
1558 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1559 	if (slave->phy && slave->phy->link) {
1560 		if (priv->shp_cfg_speed &&
1561 		    priv->shp_cfg_speed != slave->phy->speed)
1562 			prev_speed = priv->shp_cfg_speed;
1563 
1564 		priv->shp_cfg_speed = slave->phy->speed;
1565 	}
1566 
1567 	if (!priv->shp_cfg_speed) {
1568 		dev_err(priv->dev, "Link speed is not known");
1569 		return -1;
1570 	}
1571 
1572 	ret = pm_runtime_get_sync(cpsw->dev);
1573 	if (ret < 0) {
1574 		pm_runtime_put_noidle(cpsw->dev);
1575 		return ret;
1576 	}
1577 
1578 	bw = qopt->enable ? qopt->idleslope : 0;
1579 	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1580 	if (ret) {
1581 		priv->shp_cfg_speed = prev_speed;
1582 		prev_speed = 0;
1583 	}
1584 
1585 	if (bw && prev_speed)
1586 		dev_warn(priv->dev,
1587 			 "Speed was changed, CBS shaper speeds are changed!");
1588 
1589 	pm_runtime_put_sync(cpsw->dev);
1590 	return ret;
1591 }
1592 
1593 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1594 {
1595 	int fifo, bw;
1596 
1597 	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1598 		bw = priv->fifo_bw[fifo];
1599 		if (!bw)
1600 			continue;
1601 
1602 		cpsw_set_fifo_rlimit(priv, fifo, bw);
1603 	}
1604 }
1605 
1606 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1607 {
1608 	struct cpsw_common *cpsw = priv->cpsw;
1609 	u32 tx_prio_map = 0;
1610 	int i, tc, fifo;
1611 	u32 tx_prio_rg;
1612 
1613 	if (!priv->mqprio_hw)
1614 		return;
1615 
1616 	for (i = 0; i < 8; i++) {
1617 		tc = netdev_get_prio_tc_map(priv->ndev, i);
1618 		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1619 		tx_prio_map |= fifo << (4 * i);
1620 	}
1621 
1622 	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1623 		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1624 
1625 	slave_write(slave, tx_prio_map, tx_prio_rg);
1626 }
1627 
1628 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1629 {
1630 	struct cpsw_priv *priv = arg;
1631 
1632 	if (!vdev)
1633 		return 0;
1634 
1635 	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1636 	return 0;
1637 }
1638 
1639 /* restore resources after port reset */
1640 static void cpsw_restore(struct cpsw_priv *priv)
1641 {
1642 	/* restore vlan configurations */
1643 	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1644 
1645 	/* restore MQPRIO offload */
1646 	for_each_slave(priv, cpsw_mqprio_resume, priv);
1647 
1648 	/* restore CBS offload */
1649 	for_each_slave(priv, cpsw_cbs_resume, priv);
1650 }
1651 
1652 static int cpsw_ndo_open(struct net_device *ndev)
1653 {
1654 	struct cpsw_priv *priv = netdev_priv(ndev);
1655 	struct cpsw_common *cpsw = priv->cpsw;
1656 	int ret;
1657 	u32 reg;
1658 
1659 	ret = pm_runtime_get_sync(cpsw->dev);
1660 	if (ret < 0) {
1661 		pm_runtime_put_noidle(cpsw->dev);
1662 		return ret;
1663 	}
1664 
1665 	netif_carrier_off(ndev);
1666 
1667 	/* Notify the stack of the actual queue counts. */
1668 	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1669 	if (ret) {
1670 		dev_err(priv->dev, "cannot set real number of tx queues\n");
1671 		goto err_cleanup;
1672 	}
1673 
1674 	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1675 	if (ret) {
1676 		dev_err(priv->dev, "cannot set real number of rx queues\n");
1677 		goto err_cleanup;
1678 	}
1679 
1680 	reg = cpsw->version;
1681 
1682 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1683 		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1684 		 CPSW_RTL_VERSION(reg));
1685 
1686 	/* Initialize host and slave ports */
1687 	if (!cpsw->usage_count)
1688 		cpsw_init_host_port(priv);
1689 	for_each_slave(priv, cpsw_slave_open, priv);
1690 
1691 	/* Add default VLAN */
1692 	if (!cpsw->data.dual_emac)
1693 		cpsw_add_default_vlan(priv);
1694 	else
1695 		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1696 				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1697 
1698 	/* initialize shared resources for every ndev */
1699 	if (!cpsw->usage_count) {
1700 		/* disable priority elevation */
1701 		writel_relaxed(0, &cpsw->regs->ptype);
1702 
1703 		/* enable statistics collection only on all ports */
1704 		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1705 
1706 		/* Enable internal fifo flow control */
1707 		writel(0x7, &cpsw->regs->flow_control);
1708 
1709 		napi_enable(&cpsw->napi_rx);
1710 		napi_enable(&cpsw->napi_tx);
1711 
1712 		if (cpsw->tx_irq_disabled) {
1713 			cpsw->tx_irq_disabled = false;
1714 			enable_irq(cpsw->irqs_table[1]);
1715 		}
1716 
1717 		if (cpsw->rx_irq_disabled) {
1718 			cpsw->rx_irq_disabled = false;
1719 			enable_irq(cpsw->irqs_table[0]);
1720 		}
1721 
1722 		/* create rxqs for both infs in dual mac as they use same pool
1723 		 * and must be destroyed together when no users.
1724 		 */
1725 		ret = cpsw_create_xdp_rxqs(cpsw);
1726 		if (ret < 0)
1727 			goto err_cleanup;
1728 
1729 		ret = cpsw_fill_rx_channels(priv);
1730 		if (ret < 0)
1731 			goto err_cleanup;
1732 
1733 		if (cpts_register(cpsw->cpts))
1734 			dev_err(priv->dev, "error registering cpts device\n");
1735 
1736 	}
1737 
1738 	cpsw_restore(priv);
1739 
1740 	/* Enable Interrupt pacing if configured */
1741 	if (cpsw->coal_intvl != 0) {
1742 		struct ethtool_coalesce coal;
1743 
1744 		coal.rx_coalesce_usecs = cpsw->coal_intvl;
1745 		cpsw_set_coalesce(ndev, &coal);
1746 	}
1747 
1748 	cpdma_ctlr_start(cpsw->dma);
1749 	cpsw_intr_enable(cpsw);
1750 	cpsw->usage_count++;
1751 
1752 	return 0;
1753 
1754 err_cleanup:
1755 	if (!cpsw->usage_count) {
1756 		cpdma_ctlr_stop(cpsw->dma);
1757 		cpsw_destroy_xdp_rxqs(cpsw);
1758 	}
1759 
1760 	for_each_slave(priv, cpsw_slave_stop, cpsw);
1761 	pm_runtime_put_sync(cpsw->dev);
1762 	netif_carrier_off(priv->ndev);
1763 	return ret;
1764 }
1765 
1766 static int cpsw_ndo_stop(struct net_device *ndev)
1767 {
1768 	struct cpsw_priv *priv = netdev_priv(ndev);
1769 	struct cpsw_common *cpsw = priv->cpsw;
1770 
1771 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1772 	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
1773 	netif_tx_stop_all_queues(priv->ndev);
1774 	netif_carrier_off(priv->ndev);
1775 
1776 	if (cpsw->usage_count <= 1) {
1777 		napi_disable(&cpsw->napi_rx);
1778 		napi_disable(&cpsw->napi_tx);
1779 		cpts_unregister(cpsw->cpts);
1780 		cpsw_intr_disable(cpsw);
1781 		cpdma_ctlr_stop(cpsw->dma);
1782 		cpsw_ale_stop(cpsw->ale);
1783 		cpsw_destroy_xdp_rxqs(cpsw);
1784 	}
1785 	for_each_slave(priv, cpsw_slave_stop, cpsw);
1786 
1787 	if (cpsw_need_resplit(cpsw))
1788 		cpsw_split_res(cpsw);
1789 
1790 	cpsw->usage_count--;
1791 	pm_runtime_put_sync(cpsw->dev);
1792 	return 0;
1793 }
1794 
1795 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1796 				       struct net_device *ndev)
1797 {
1798 	struct cpsw_priv *priv = netdev_priv(ndev);
1799 	struct cpsw_common *cpsw = priv->cpsw;
1800 	struct cpts *cpts = cpsw->cpts;
1801 	struct netdev_queue *txq;
1802 	struct cpdma_chan *txch;
1803 	int ret, q_idx;
1804 
1805 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1806 		cpsw_err(priv, tx_err, "packet pad failed\n");
1807 		ndev->stats.tx_dropped++;
1808 		return NET_XMIT_DROP;
1809 	}
1810 
1811 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1812 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
1813 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1814 
1815 	q_idx = skb_get_queue_mapping(skb);
1816 	if (q_idx >= cpsw->tx_ch_num)
1817 		q_idx = q_idx % cpsw->tx_ch_num;
1818 
1819 	txch = cpsw->txv[q_idx].ch;
1820 	txq = netdev_get_tx_queue(ndev, q_idx);
1821 	skb_tx_timestamp(skb);
1822 	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
1823 				priv->emac_port + cpsw->data.dual_emac);
1824 	if (unlikely(ret != 0)) {
1825 		cpsw_err(priv, tx_err, "desc submit failed\n");
1826 		goto fail;
1827 	}
1828 
1829 	/* If there is no more tx desc left free then we need to
1830 	 * tell the kernel to stop sending us tx frames.
1831 	 */
1832 	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1833 		netif_tx_stop_queue(txq);
1834 
1835 		/* Barrier, so that stop_queue visible to other cpus */
1836 		smp_mb__after_atomic();
1837 
1838 		if (cpdma_check_free_tx_desc(txch))
1839 			netif_tx_wake_queue(txq);
1840 	}
1841 
1842 	return NETDEV_TX_OK;
1843 fail:
1844 	ndev->stats.tx_dropped++;
1845 	netif_tx_stop_queue(txq);
1846 
1847 	/* Barrier, so that stop_queue visible to other cpus */
1848 	smp_mb__after_atomic();
1849 
1850 	if (cpdma_check_free_tx_desc(txch))
1851 		netif_tx_wake_queue(txq);
1852 
1853 	return NETDEV_TX_BUSY;
1854 }
1855 
1856 #if IS_ENABLED(CONFIG_TI_CPTS)
1857 
1858 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1859 {
1860 	struct cpsw_common *cpsw = priv->cpsw;
1861 	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1862 	u32 ts_en, seq_id;
1863 
1864 	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
1865 		slave_write(slave, 0, CPSW1_TS_CTL);
1866 		return;
1867 	}
1868 
1869 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1870 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1871 
1872 	if (priv->tx_ts_enabled)
1873 		ts_en |= CPSW_V1_TS_TX_EN;
1874 
1875 	if (priv->rx_ts_enabled)
1876 		ts_en |= CPSW_V1_TS_RX_EN;
1877 
1878 	slave_write(slave, ts_en, CPSW1_TS_CTL);
1879 	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1880 }
1881 
1882 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1883 {
1884 	struct cpsw_slave *slave;
1885 	struct cpsw_common *cpsw = priv->cpsw;
1886 	u32 ctrl, mtype;
1887 
1888 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1889 
1890 	ctrl = slave_read(slave, CPSW2_CONTROL);
1891 	switch (cpsw->version) {
1892 	case CPSW_VERSION_2:
1893 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
1894 
1895 		if (priv->tx_ts_enabled)
1896 			ctrl |= CTRL_V2_TX_TS_BITS;
1897 
1898 		if (priv->rx_ts_enabled)
1899 			ctrl |= CTRL_V2_RX_TS_BITS;
1900 		break;
1901 	case CPSW_VERSION_3:
1902 	default:
1903 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
1904 
1905 		if (priv->tx_ts_enabled)
1906 			ctrl |= CTRL_V3_TX_TS_BITS;
1907 
1908 		if (priv->rx_ts_enabled)
1909 			ctrl |= CTRL_V3_RX_TS_BITS;
1910 		break;
1911 	}
1912 
1913 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1914 
1915 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1916 	slave_write(slave, ctrl, CPSW2_CONTROL);
1917 	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
1918 	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
1919 }
1920 
1921 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1922 {
1923 	struct cpsw_priv *priv = netdev_priv(dev);
1924 	struct hwtstamp_config cfg;
1925 	struct cpsw_common *cpsw = priv->cpsw;
1926 
1927 	if (cpsw->version != CPSW_VERSION_1 &&
1928 	    cpsw->version != CPSW_VERSION_2 &&
1929 	    cpsw->version != CPSW_VERSION_3)
1930 		return -EOPNOTSUPP;
1931 
1932 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1933 		return -EFAULT;
1934 
1935 	/* reserved for future extensions */
1936 	if (cfg.flags)
1937 		return -EINVAL;
1938 
1939 	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1940 		return -ERANGE;
1941 
1942 	switch (cfg.rx_filter) {
1943 	case HWTSTAMP_FILTER_NONE:
1944 		priv->rx_ts_enabled = 0;
1945 		break;
1946 	case HWTSTAMP_FILTER_ALL:
1947 	case HWTSTAMP_FILTER_NTP_ALL:
1948 		return -ERANGE;
1949 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1950 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1951 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1952 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1953 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1954 		break;
1955 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1956 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1957 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1958 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1959 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1960 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1961 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1962 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1963 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1964 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
1965 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1966 		break;
1967 	default:
1968 		return -ERANGE;
1969 	}
1970 
1971 	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
1972 
1973 	switch (cpsw->version) {
1974 	case CPSW_VERSION_1:
1975 		cpsw_hwtstamp_v1(priv);
1976 		break;
1977 	case CPSW_VERSION_2:
1978 	case CPSW_VERSION_3:
1979 		cpsw_hwtstamp_v2(priv);
1980 		break;
1981 	default:
1982 		WARN_ON(1);
1983 	}
1984 
1985 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1986 }
1987 
1988 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1989 {
1990 	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1991 	struct cpsw_priv *priv = netdev_priv(dev);
1992 	struct hwtstamp_config cfg;
1993 
1994 	if (cpsw->version != CPSW_VERSION_1 &&
1995 	    cpsw->version != CPSW_VERSION_2 &&
1996 	    cpsw->version != CPSW_VERSION_3)
1997 		return -EOPNOTSUPP;
1998 
1999 	cfg.flags = 0;
2000 	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2001 	cfg.rx_filter = priv->rx_ts_enabled;
2002 
2003 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2004 }
2005 #else
2006 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2007 {
2008 	return -EOPNOTSUPP;
2009 }
2010 
2011 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2012 {
2013 	return -EOPNOTSUPP;
2014 }
2015 #endif /*CONFIG_TI_CPTS*/
2016 
2017 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2018 {
2019 	struct cpsw_priv *priv = netdev_priv(dev);
2020 	struct cpsw_common *cpsw = priv->cpsw;
2021 	int slave_no = cpsw_slave_index(cpsw, priv);
2022 
2023 	if (!netif_running(dev))
2024 		return -EINVAL;
2025 
2026 	switch (cmd) {
2027 	case SIOCSHWTSTAMP:
2028 		return cpsw_hwtstamp_set(dev, req);
2029 	case SIOCGHWTSTAMP:
2030 		return cpsw_hwtstamp_get(dev, req);
2031 	}
2032 
2033 	if (!cpsw->slaves[slave_no].phy)
2034 		return -EOPNOTSUPP;
2035 	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2036 }
2037 
2038 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2039 {
2040 	struct cpsw_priv *priv = netdev_priv(ndev);
2041 	struct cpsw_common *cpsw = priv->cpsw;
2042 	int ch;
2043 
2044 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2045 	ndev->stats.tx_errors++;
2046 	cpsw_intr_disable(cpsw);
2047 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2048 		cpdma_chan_stop(cpsw->txv[ch].ch);
2049 		cpdma_chan_start(cpsw->txv[ch].ch);
2050 	}
2051 
2052 	cpsw_intr_enable(cpsw);
2053 	netif_trans_update(ndev);
2054 	netif_tx_wake_all_queues(ndev);
2055 }
2056 
2057 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2058 {
2059 	struct cpsw_priv *priv = netdev_priv(ndev);
2060 	struct sockaddr *addr = (struct sockaddr *)p;
2061 	struct cpsw_common *cpsw = priv->cpsw;
2062 	int flags = 0;
2063 	u16 vid = 0;
2064 	int ret;
2065 
2066 	if (!is_valid_ether_addr(addr->sa_data))
2067 		return -EADDRNOTAVAIL;
2068 
2069 	ret = pm_runtime_get_sync(cpsw->dev);
2070 	if (ret < 0) {
2071 		pm_runtime_put_noidle(cpsw->dev);
2072 		return ret;
2073 	}
2074 
2075 	if (cpsw->data.dual_emac) {
2076 		vid = cpsw->slaves[priv->emac_port].port_vlan;
2077 		flags = ALE_VLAN;
2078 	}
2079 
2080 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2081 			   flags, vid);
2082 	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2083 			   flags, vid);
2084 
2085 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2086 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2087 	for_each_slave(priv, cpsw_set_slave_mac, priv);
2088 
2089 	pm_runtime_put(cpsw->dev);
2090 
2091 	return 0;
2092 }
2093 
2094 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2095 				unsigned short vid)
2096 {
2097 	int ret;
2098 	int unreg_mcast_mask = 0;
2099 	int mcast_mask;
2100 	u32 port_mask;
2101 	struct cpsw_common *cpsw = priv->cpsw;
2102 
2103 	if (cpsw->data.dual_emac) {
2104 		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2105 
2106 		mcast_mask = ALE_PORT_HOST;
2107 		if (priv->ndev->flags & IFF_ALLMULTI)
2108 			unreg_mcast_mask = mcast_mask;
2109 	} else {
2110 		port_mask = ALE_ALL_PORTS;
2111 		mcast_mask = port_mask;
2112 
2113 		if (priv->ndev->flags & IFF_ALLMULTI)
2114 			unreg_mcast_mask = ALE_ALL_PORTS;
2115 		else
2116 			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2117 	}
2118 
2119 	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2120 				unreg_mcast_mask);
2121 	if (ret != 0)
2122 		return ret;
2123 
2124 	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2125 				 HOST_PORT_NUM, ALE_VLAN, vid);
2126 	if (ret != 0)
2127 		goto clean_vid;
2128 
2129 	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2130 				 mcast_mask, ALE_VLAN, vid, 0);
2131 	if (ret != 0)
2132 		goto clean_vlan_ucast;
2133 	return 0;
2134 
2135 clean_vlan_ucast:
2136 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2137 			   HOST_PORT_NUM, ALE_VLAN, vid);
2138 clean_vid:
2139 	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2140 	return ret;
2141 }
2142 
2143 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2144 				    __be16 proto, u16 vid)
2145 {
2146 	struct cpsw_priv *priv = netdev_priv(ndev);
2147 	struct cpsw_common *cpsw = priv->cpsw;
2148 	int ret;
2149 
2150 	if (vid == cpsw->data.default_vlan)
2151 		return 0;
2152 
2153 	ret = pm_runtime_get_sync(cpsw->dev);
2154 	if (ret < 0) {
2155 		pm_runtime_put_noidle(cpsw->dev);
2156 		return ret;
2157 	}
2158 
2159 	if (cpsw->data.dual_emac) {
2160 		/* In dual EMAC, reserved VLAN id should not be used for
2161 		 * creating VLAN interfaces as this can break the dual
2162 		 * EMAC port separation
2163 		 */
2164 		int i;
2165 
2166 		for (i = 0; i < cpsw->data.slaves; i++) {
2167 			if (vid == cpsw->slaves[i].port_vlan) {
2168 				ret = -EINVAL;
2169 				goto err;
2170 			}
2171 		}
2172 	}
2173 
2174 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2175 	ret = cpsw_add_vlan_ale_entry(priv, vid);
2176 err:
2177 	pm_runtime_put(cpsw->dev);
2178 	return ret;
2179 }
2180 
2181 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2182 				     __be16 proto, u16 vid)
2183 {
2184 	struct cpsw_priv *priv = netdev_priv(ndev);
2185 	struct cpsw_common *cpsw = priv->cpsw;
2186 	int ret;
2187 
2188 	if (vid == cpsw->data.default_vlan)
2189 		return 0;
2190 
2191 	ret = pm_runtime_get_sync(cpsw->dev);
2192 	if (ret < 0) {
2193 		pm_runtime_put_noidle(cpsw->dev);
2194 		return ret;
2195 	}
2196 
2197 	if (cpsw->data.dual_emac) {
2198 		int i;
2199 
2200 		for (i = 0; i < cpsw->data.slaves; i++) {
2201 			if (vid == cpsw->slaves[i].port_vlan)
2202 				goto err;
2203 		}
2204 	}
2205 
2206 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2207 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2208 	ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2209 				  HOST_PORT_NUM, ALE_VLAN, vid);
2210 	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2211 				  0, ALE_VLAN, vid);
2212 	ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
2213 err:
2214 	pm_runtime_put(cpsw->dev);
2215 	return ret;
2216 }
2217 
2218 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2219 {
2220 	struct cpsw_priv *priv = netdev_priv(ndev);
2221 	struct cpsw_common *cpsw = priv->cpsw;
2222 	struct cpsw_slave *slave;
2223 	u32 min_rate;
2224 	u32 ch_rate;
2225 	int i, ret;
2226 
2227 	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2228 	if (ch_rate == rate)
2229 		return 0;
2230 
2231 	ch_rate = rate * 1000;
2232 	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2233 	if ((ch_rate < min_rate && ch_rate)) {
2234 		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2235 			min_rate);
2236 		return -EINVAL;
2237 	}
2238 
2239 	if (rate > cpsw->speed) {
2240 		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2241 		return -EINVAL;
2242 	}
2243 
2244 	ret = pm_runtime_get_sync(cpsw->dev);
2245 	if (ret < 0) {
2246 		pm_runtime_put_noidle(cpsw->dev);
2247 		return ret;
2248 	}
2249 
2250 	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2251 	pm_runtime_put(cpsw->dev);
2252 
2253 	if (ret)
2254 		return ret;
2255 
2256 	/* update rates for slaves tx queues */
2257 	for (i = 0; i < cpsw->data.slaves; i++) {
2258 		slave = &cpsw->slaves[i];
2259 		if (!slave->ndev)
2260 			continue;
2261 
2262 		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2263 	}
2264 
2265 	cpsw_split_res(cpsw);
2266 	return ret;
2267 }
2268 
2269 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2270 {
2271 	struct tc_mqprio_qopt_offload *mqprio = type_data;
2272 	struct cpsw_priv *priv = netdev_priv(ndev);
2273 	struct cpsw_common *cpsw = priv->cpsw;
2274 	int fifo, num_tc, count, offset;
2275 	struct cpsw_slave *slave;
2276 	u32 tx_prio_map = 0;
2277 	int i, tc, ret;
2278 
2279 	num_tc = mqprio->qopt.num_tc;
2280 	if (num_tc > CPSW_TC_NUM)
2281 		return -EINVAL;
2282 
2283 	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2284 		return -EINVAL;
2285 
2286 	ret = pm_runtime_get_sync(cpsw->dev);
2287 	if (ret < 0) {
2288 		pm_runtime_put_noidle(cpsw->dev);
2289 		return ret;
2290 	}
2291 
2292 	if (num_tc) {
2293 		for (i = 0; i < 8; i++) {
2294 			tc = mqprio->qopt.prio_tc_map[i];
2295 			fifo = cpsw_tc_to_fifo(tc, num_tc);
2296 			tx_prio_map |= fifo << (4 * i);
2297 		}
2298 
2299 		netdev_set_num_tc(ndev, num_tc);
2300 		for (i = 0; i < num_tc; i++) {
2301 			count = mqprio->qopt.count[i];
2302 			offset = mqprio->qopt.offset[i];
2303 			netdev_set_tc_queue(ndev, i, count, offset);
2304 		}
2305 	}
2306 
2307 	if (!mqprio->qopt.hw) {
2308 		/* restore default configuration */
2309 		netdev_reset_tc(ndev);
2310 		tx_prio_map = TX_PRIORITY_MAPPING;
2311 	}
2312 
2313 	priv->mqprio_hw = mqprio->qopt.hw;
2314 
2315 	offset = cpsw->version == CPSW_VERSION_1 ?
2316 		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2317 
2318 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2319 	slave_write(slave, tx_prio_map, offset);
2320 
2321 	pm_runtime_put_sync(cpsw->dev);
2322 
2323 	return 0;
2324 }
2325 
2326 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2327 			     void *type_data)
2328 {
2329 	switch (type) {
2330 	case TC_SETUP_QDISC_CBS:
2331 		return cpsw_set_cbs(ndev, type_data);
2332 
2333 	case TC_SETUP_QDISC_MQPRIO:
2334 		return cpsw_set_mqprio(ndev, type_data);
2335 
2336 	default:
2337 		return -EOPNOTSUPP;
2338 	}
2339 }
2340 
2341 static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
2342 {
2343 	struct bpf_prog *prog = bpf->prog;
2344 
2345 	if (!priv->xdpi.prog && !prog)
2346 		return 0;
2347 
2348 	if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
2349 		return -EBUSY;
2350 
2351 	WRITE_ONCE(priv->xdp_prog, prog);
2352 
2353 	xdp_attachment_setup(&priv->xdpi, bpf);
2354 
2355 	return 0;
2356 }
2357 
2358 static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
2359 {
2360 	struct cpsw_priv *priv = netdev_priv(ndev);
2361 
2362 	switch (bpf->command) {
2363 	case XDP_SETUP_PROG:
2364 		return cpsw_xdp_prog_setup(priv, bpf);
2365 
2366 	case XDP_QUERY_PROG:
2367 		return xdp_attachment_query(&priv->xdpi, bpf);
2368 
2369 	default:
2370 		return -EINVAL;
2371 	}
2372 }
2373 
2374 static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
2375 			     struct xdp_frame **frames, u32 flags)
2376 {
2377 	struct cpsw_priv *priv = netdev_priv(ndev);
2378 	struct xdp_frame *xdpf;
2379 	int i, drops = 0;
2380 
2381 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2382 		return -EINVAL;
2383 
2384 	for (i = 0; i < n; i++) {
2385 		xdpf = frames[i];
2386 		if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
2387 			xdp_return_frame_rx_napi(xdpf);
2388 			drops++;
2389 			continue;
2390 		}
2391 
2392 		if (cpsw_xdp_tx_frame(priv, xdpf, NULL))
2393 			drops++;
2394 	}
2395 
2396 	return n - drops;
2397 }
2398 
2399 #ifdef CONFIG_NET_POLL_CONTROLLER
2400 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2401 {
2402 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2403 
2404 	cpsw_intr_disable(cpsw);
2405 	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2406 	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2407 	cpsw_intr_enable(cpsw);
2408 }
2409 #endif
2410 
2411 static const struct net_device_ops cpsw_netdev_ops = {
2412 	.ndo_open		= cpsw_ndo_open,
2413 	.ndo_stop		= cpsw_ndo_stop,
2414 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
2415 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
2416 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
2417 	.ndo_validate_addr	= eth_validate_addr,
2418 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
2419 	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
2420 	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
2421 #ifdef CONFIG_NET_POLL_CONTROLLER
2422 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
2423 #endif
2424 	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
2425 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
2426 	.ndo_setup_tc           = cpsw_ndo_setup_tc,
2427 	.ndo_bpf		= cpsw_ndo_bpf,
2428 	.ndo_xdp_xmit		= cpsw_ndo_xdp_xmit,
2429 };
2430 
2431 static void cpsw_get_drvinfo(struct net_device *ndev,
2432 			     struct ethtool_drvinfo *info)
2433 {
2434 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2435 	struct platform_device	*pdev = to_platform_device(cpsw->dev);
2436 
2437 	strlcpy(info->driver, "cpsw", sizeof(info->driver));
2438 	strlcpy(info->version, "1.0", sizeof(info->version));
2439 	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2440 }
2441 
2442 static int cpsw_set_pauseparam(struct net_device *ndev,
2443 			       struct ethtool_pauseparam *pause)
2444 {
2445 	struct cpsw_priv *priv = netdev_priv(ndev);
2446 	bool link;
2447 
2448 	priv->rx_pause = pause->rx_pause ? true : false;
2449 	priv->tx_pause = pause->tx_pause ? true : false;
2450 
2451 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2452 	return 0;
2453 }
2454 
2455 static int cpsw_set_channels(struct net_device *ndev,
2456 			     struct ethtool_channels *chs)
2457 {
2458 	return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
2459 }
2460 
2461 static const struct ethtool_ops cpsw_ethtool_ops = {
2462 	.get_drvinfo	= cpsw_get_drvinfo,
2463 	.get_msglevel	= cpsw_get_msglevel,
2464 	.set_msglevel	= cpsw_set_msglevel,
2465 	.get_link	= ethtool_op_get_link,
2466 	.get_ts_info	= cpsw_get_ts_info,
2467 	.get_coalesce	= cpsw_get_coalesce,
2468 	.set_coalesce	= cpsw_set_coalesce,
2469 	.get_sset_count		= cpsw_get_sset_count,
2470 	.get_strings		= cpsw_get_strings,
2471 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
2472 	.get_pauseparam		= cpsw_get_pauseparam,
2473 	.set_pauseparam		= cpsw_set_pauseparam,
2474 	.get_wol	= cpsw_get_wol,
2475 	.set_wol	= cpsw_set_wol,
2476 	.get_regs_len	= cpsw_get_regs_len,
2477 	.get_regs	= cpsw_get_regs,
2478 	.begin		= cpsw_ethtool_op_begin,
2479 	.complete	= cpsw_ethtool_op_complete,
2480 	.get_channels	= cpsw_get_channels,
2481 	.set_channels	= cpsw_set_channels,
2482 	.get_link_ksettings	= cpsw_get_link_ksettings,
2483 	.set_link_ksettings	= cpsw_set_link_ksettings,
2484 	.get_eee	= cpsw_get_eee,
2485 	.set_eee	= cpsw_set_eee,
2486 	.nway_reset	= cpsw_nway_reset,
2487 	.get_ringparam = cpsw_get_ringparam,
2488 	.set_ringparam = cpsw_set_ringparam,
2489 };
2490 
2491 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2492 			 struct platform_device *pdev)
2493 {
2494 	struct device_node *node = pdev->dev.of_node;
2495 	struct device_node *slave_node;
2496 	int i = 0, ret;
2497 	u32 prop;
2498 
2499 	if (!node)
2500 		return -EINVAL;
2501 
2502 	if (of_property_read_u32(node, "slaves", &prop)) {
2503 		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2504 		return -EINVAL;
2505 	}
2506 	data->slaves = prop;
2507 
2508 	if (of_property_read_u32(node, "active_slave", &prop)) {
2509 		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2510 		return -EINVAL;
2511 	}
2512 	data->active_slave = prop;
2513 
2514 	data->slave_data = devm_kcalloc(&pdev->dev,
2515 					data->slaves,
2516 					sizeof(struct cpsw_slave_data),
2517 					GFP_KERNEL);
2518 	if (!data->slave_data)
2519 		return -ENOMEM;
2520 
2521 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2522 		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2523 		return -EINVAL;
2524 	}
2525 	data->channels = prop;
2526 
2527 	if (of_property_read_u32(node, "ale_entries", &prop)) {
2528 		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2529 		return -EINVAL;
2530 	}
2531 	data->ale_entries = prop;
2532 
2533 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2534 		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2535 		return -EINVAL;
2536 	}
2537 	data->bd_ram_size = prop;
2538 
2539 	if (of_property_read_u32(node, "mac_control", &prop)) {
2540 		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2541 		return -EINVAL;
2542 	}
2543 	data->mac_control = prop;
2544 
2545 	if (of_property_read_bool(node, "dual_emac"))
2546 		data->dual_emac = 1;
2547 
2548 	/*
2549 	 * Populate all the child nodes here...
2550 	 */
2551 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2552 	/* We do not want to force this, as in some cases may not have child */
2553 	if (ret)
2554 		dev_warn(&pdev->dev, "Doesn't have any child node\n");
2555 
2556 	for_each_available_child_of_node(node, slave_node) {
2557 		struct cpsw_slave_data *slave_data = data->slave_data + i;
2558 		const void *mac_addr = NULL;
2559 		int lenp;
2560 		const __be32 *parp;
2561 
2562 		/* This is no slave child node, continue */
2563 		if (!of_node_name_eq(slave_node, "slave"))
2564 			continue;
2565 
2566 		slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
2567 						    NULL);
2568 		if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
2569 		    IS_ERR(slave_data->ifphy)) {
2570 			ret = PTR_ERR(slave_data->ifphy);
2571 			dev_err(&pdev->dev,
2572 				"%d: Error retrieving port phy: %d\n", i, ret);
2573 			return ret;
2574 		}
2575 
2576 		slave_data->slave_node = slave_node;
2577 		slave_data->phy_node = of_parse_phandle(slave_node,
2578 							"phy-handle", 0);
2579 		parp = of_get_property(slave_node, "phy_id", &lenp);
2580 		if (slave_data->phy_node) {
2581 			dev_dbg(&pdev->dev,
2582 				"slave[%d] using phy-handle=\"%pOF\"\n",
2583 				i, slave_data->phy_node);
2584 		} else if (of_phy_is_fixed_link(slave_node)) {
2585 			/* In the case of a fixed PHY, the DT node associated
2586 			 * to the PHY is the Ethernet MAC DT node.
2587 			 */
2588 			ret = of_phy_register_fixed_link(slave_node);
2589 			if (ret) {
2590 				if (ret != -EPROBE_DEFER)
2591 					dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2592 				return ret;
2593 			}
2594 			slave_data->phy_node = of_node_get(slave_node);
2595 		} else if (parp) {
2596 			u32 phyid;
2597 			struct device_node *mdio_node;
2598 			struct platform_device *mdio;
2599 
2600 			if (lenp != (sizeof(__be32) * 2)) {
2601 				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2602 				goto no_phy_slave;
2603 			}
2604 			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2605 			phyid = be32_to_cpup(parp+1);
2606 			mdio = of_find_device_by_node(mdio_node);
2607 			of_node_put(mdio_node);
2608 			if (!mdio) {
2609 				dev_err(&pdev->dev, "Missing mdio platform device\n");
2610 				return -EINVAL;
2611 			}
2612 			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2613 				 PHY_ID_FMT, mdio->name, phyid);
2614 			put_device(&mdio->dev);
2615 		} else {
2616 			dev_err(&pdev->dev,
2617 				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2618 				i);
2619 			goto no_phy_slave;
2620 		}
2621 		slave_data->phy_if = of_get_phy_mode(slave_node);
2622 		if (slave_data->phy_if < 0) {
2623 			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2624 				i);
2625 			return slave_data->phy_if;
2626 		}
2627 
2628 no_phy_slave:
2629 		mac_addr = of_get_mac_address(slave_node);
2630 		if (!IS_ERR(mac_addr)) {
2631 			ether_addr_copy(slave_data->mac_addr, mac_addr);
2632 		} else {
2633 			ret = ti_cm_get_macid(&pdev->dev, i,
2634 					      slave_data->mac_addr);
2635 			if (ret)
2636 				return ret;
2637 		}
2638 		if (data->dual_emac) {
2639 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2640 						 &prop)) {
2641 				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2642 				slave_data->dual_emac_res_vlan = i+1;
2643 				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2644 					slave_data->dual_emac_res_vlan, i);
2645 			} else {
2646 				slave_data->dual_emac_res_vlan = prop;
2647 			}
2648 		}
2649 
2650 		i++;
2651 		if (i == data->slaves)
2652 			break;
2653 	}
2654 
2655 	return 0;
2656 }
2657 
2658 static void cpsw_remove_dt(struct platform_device *pdev)
2659 {
2660 	struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2661 	struct cpsw_platform_data *data = &cpsw->data;
2662 	struct device_node *node = pdev->dev.of_node;
2663 	struct device_node *slave_node;
2664 	int i = 0;
2665 
2666 	for_each_available_child_of_node(node, slave_node) {
2667 		struct cpsw_slave_data *slave_data = &data->slave_data[i];
2668 
2669 		if (!of_node_name_eq(slave_node, "slave"))
2670 			continue;
2671 
2672 		if (of_phy_is_fixed_link(slave_node))
2673 			of_phy_deregister_fixed_link(slave_node);
2674 
2675 		of_node_put(slave_data->phy_node);
2676 
2677 		i++;
2678 		if (i == data->slaves)
2679 			break;
2680 	}
2681 
2682 	of_platform_depopulate(&pdev->dev);
2683 }
2684 
2685 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2686 {
2687 	struct cpsw_common		*cpsw = priv->cpsw;
2688 	struct cpsw_platform_data	*data = &cpsw->data;
2689 	struct net_device		*ndev;
2690 	struct cpsw_priv		*priv_sl2;
2691 	int ret = 0;
2692 
2693 	ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
2694 				       CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2695 	if (!ndev) {
2696 		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2697 		return -ENOMEM;
2698 	}
2699 
2700 	priv_sl2 = netdev_priv(ndev);
2701 	priv_sl2->cpsw = cpsw;
2702 	priv_sl2->ndev = ndev;
2703 	priv_sl2->dev  = &ndev->dev;
2704 	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2705 
2706 	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2707 		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2708 			ETH_ALEN);
2709 		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2710 			 priv_sl2->mac_addr);
2711 	} else {
2712 		eth_random_addr(priv_sl2->mac_addr);
2713 		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2714 			 priv_sl2->mac_addr);
2715 	}
2716 	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2717 
2718 	priv_sl2->emac_port = 1;
2719 	cpsw->slaves[1].ndev = ndev;
2720 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2721 
2722 	ndev->netdev_ops = &cpsw_netdev_ops;
2723 	ndev->ethtool_ops = &cpsw_ethtool_ops;
2724 
2725 	/* register the network device */
2726 	SET_NETDEV_DEV(ndev, cpsw->dev);
2727 	ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
2728 	ret = register_netdev(ndev);
2729 	if (ret)
2730 		dev_err(cpsw->dev, "cpsw: error registering net device\n");
2731 
2732 	return ret;
2733 }
2734 
2735 static const struct of_device_id cpsw_of_mtable[] = {
2736 	{ .compatible = "ti,cpsw"},
2737 	{ .compatible = "ti,am335x-cpsw"},
2738 	{ .compatible = "ti,am4372-cpsw"},
2739 	{ .compatible = "ti,dra7-cpsw"},
2740 	{ /* sentinel */ },
2741 };
2742 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2743 
2744 static const struct soc_device_attribute cpsw_soc_devices[] = {
2745 	{ .family = "AM33xx", .revision = "ES1.0"},
2746 	{ /* sentinel */ }
2747 };
2748 
2749 static int cpsw_probe(struct platform_device *pdev)
2750 {
2751 	struct device			*dev = &pdev->dev;
2752 	struct clk			*clk;
2753 	struct cpsw_platform_data	*data;
2754 	struct net_device		*ndev;
2755 	struct cpsw_priv		*priv;
2756 	void __iomem			*ss_regs;
2757 	struct resource			*res, *ss_res;
2758 	struct gpio_descs		*mode;
2759 	const struct soc_device_attribute *soc;
2760 	struct cpsw_common		*cpsw;
2761 	int ret = 0, ch;
2762 	int irq;
2763 
2764 	cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
2765 	if (!cpsw)
2766 		return -ENOMEM;
2767 
2768 	cpsw->dev = dev;
2769 
2770 	mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
2771 	if (IS_ERR(mode)) {
2772 		ret = PTR_ERR(mode);
2773 		dev_err(dev, "gpio request failed, ret %d\n", ret);
2774 		return ret;
2775 	}
2776 
2777 	clk = devm_clk_get(dev, "fck");
2778 	if (IS_ERR(clk)) {
2779 		ret = PTR_ERR(clk);
2780 		dev_err(dev, "fck is not found %d\n", ret);
2781 		return ret;
2782 	}
2783 	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2784 
2785 	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2786 	ss_regs = devm_ioremap_resource(dev, ss_res);
2787 	if (IS_ERR(ss_regs))
2788 		return PTR_ERR(ss_regs);
2789 	cpsw->regs = ss_regs;
2790 
2791 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2792 	cpsw->wr_regs = devm_ioremap_resource(dev, res);
2793 	if (IS_ERR(cpsw->wr_regs))
2794 		return PTR_ERR(cpsw->wr_regs);
2795 
2796 	/* RX IRQ */
2797 	irq = platform_get_irq(pdev, 1);
2798 	if (irq < 0)
2799 		return irq;
2800 	cpsw->irqs_table[0] = irq;
2801 
2802 	/* TX IRQ */
2803 	irq = platform_get_irq(pdev, 2);
2804 	if (irq < 0)
2805 		return irq;
2806 	cpsw->irqs_table[1] = irq;
2807 
2808 	/*
2809 	 * This may be required here for child devices.
2810 	 */
2811 	pm_runtime_enable(dev);
2812 
2813 	/* Need to enable clocks with runtime PM api to access module
2814 	 * registers
2815 	 */
2816 	ret = pm_runtime_get_sync(dev);
2817 	if (ret < 0) {
2818 		pm_runtime_put_noidle(dev);
2819 		goto clean_runtime_disable_ret;
2820 	}
2821 
2822 	ret = cpsw_probe_dt(&cpsw->data, pdev);
2823 	if (ret)
2824 		goto clean_dt_ret;
2825 
2826 	soc = soc_device_match(cpsw_soc_devices);
2827 	if (soc)
2828 		cpsw->quirk_irq = 1;
2829 
2830 	data = &cpsw->data;
2831 	cpsw->slaves = devm_kcalloc(dev,
2832 				    data->slaves, sizeof(struct cpsw_slave),
2833 				    GFP_KERNEL);
2834 	if (!cpsw->slaves) {
2835 		ret = -ENOMEM;
2836 		goto clean_dt_ret;
2837 	}
2838 
2839 	cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
2840 	cpsw->descs_pool_size = descs_pool_size;
2841 
2842 	ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
2843 			       ss_res->start + CPSW2_BD_OFFSET,
2844 			       descs_pool_size);
2845 	if (ret)
2846 		goto clean_dt_ret;
2847 
2848 	ch = cpsw->quirk_irq ? 0 : 7;
2849 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
2850 	if (IS_ERR(cpsw->txv[0].ch)) {
2851 		dev_err(dev, "error initializing tx dma channel\n");
2852 		ret = PTR_ERR(cpsw->txv[0].ch);
2853 		goto clean_cpts;
2854 	}
2855 
2856 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
2857 	if (IS_ERR(cpsw->rxv[0].ch)) {
2858 		dev_err(dev, "error initializing rx dma channel\n");
2859 		ret = PTR_ERR(cpsw->rxv[0].ch);
2860 		goto clean_cpts;
2861 	}
2862 	cpsw_split_res(cpsw);
2863 
2864 	/* setup netdev */
2865 	ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
2866 				       CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2867 	if (!ndev) {
2868 		dev_err(dev, "error allocating net_device\n");
2869 		goto clean_cpts;
2870 	}
2871 
2872 	platform_set_drvdata(pdev, cpsw);
2873 	priv = netdev_priv(ndev);
2874 	priv->cpsw = cpsw;
2875 	priv->ndev = ndev;
2876 	priv->dev  = dev;
2877 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2878 	priv->emac_port = 0;
2879 
2880 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2881 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2882 		dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
2883 	} else {
2884 		eth_random_addr(priv->mac_addr);
2885 		dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
2886 	}
2887 
2888 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2889 
2890 	cpsw->slaves[0].ndev = ndev;
2891 
2892 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2893 
2894 	ndev->netdev_ops = &cpsw_netdev_ops;
2895 	ndev->ethtool_ops = &cpsw_ethtool_ops;
2896 	netif_napi_add(ndev, &cpsw->napi_rx,
2897 		       cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
2898 		       CPSW_POLL_WEIGHT);
2899 	netif_tx_napi_add(ndev, &cpsw->napi_tx,
2900 			  cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
2901 			  CPSW_POLL_WEIGHT);
2902 
2903 	/* register the network device */
2904 	SET_NETDEV_DEV(ndev, dev);
2905 	ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
2906 	ret = register_netdev(ndev);
2907 	if (ret) {
2908 		dev_err(dev, "error registering net device\n");
2909 		ret = -ENODEV;
2910 		goto clean_cpts;
2911 	}
2912 
2913 	if (cpsw->data.dual_emac) {
2914 		ret = cpsw_probe_dual_emac(priv);
2915 		if (ret) {
2916 			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2917 			goto clean_unregister_netdev_ret;
2918 		}
2919 	}
2920 
2921 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2922 	 * MISC IRQs which are always kept disabled with this driver so
2923 	 * we will not request them.
2924 	 *
2925 	 * If anyone wants to implement support for those, make sure to
2926 	 * first request and append them to irqs_table array.
2927 	 */
2928 	ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
2929 			       0, dev_name(dev), cpsw);
2930 	if (ret < 0) {
2931 		dev_err(dev, "error attaching irq (%d)\n", ret);
2932 		goto clean_unregister_netdev_ret;
2933 	}
2934 
2935 
2936 	ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
2937 			       0, dev_name(&pdev->dev), cpsw);
2938 	if (ret < 0) {
2939 		dev_err(dev, "error attaching irq (%d)\n", ret);
2940 		goto clean_unregister_netdev_ret;
2941 	}
2942 
2943 	cpsw_notice(priv, probe,
2944 		    "initialized device (regs %pa, irq %d, pool size %d)\n",
2945 		    &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
2946 
2947 	pm_runtime_put(&pdev->dev);
2948 
2949 	return 0;
2950 
2951 clean_unregister_netdev_ret:
2952 	unregister_netdev(ndev);
2953 clean_cpts:
2954 	cpts_release(cpsw->cpts);
2955 	cpdma_ctlr_destroy(cpsw->dma);
2956 clean_dt_ret:
2957 	cpsw_remove_dt(pdev);
2958 	pm_runtime_put_sync(&pdev->dev);
2959 clean_runtime_disable_ret:
2960 	pm_runtime_disable(&pdev->dev);
2961 	return ret;
2962 }
2963 
2964 static int cpsw_remove(struct platform_device *pdev)
2965 {
2966 	struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2967 	int i, ret;
2968 
2969 	ret = pm_runtime_get_sync(&pdev->dev);
2970 	if (ret < 0) {
2971 		pm_runtime_put_noidle(&pdev->dev);
2972 		return ret;
2973 	}
2974 
2975 	for (i = 0; i < cpsw->data.slaves; i++)
2976 		if (cpsw->slaves[i].ndev)
2977 			unregister_netdev(cpsw->slaves[i].ndev);
2978 
2979 	cpts_release(cpsw->cpts);
2980 	cpdma_ctlr_destroy(cpsw->dma);
2981 	cpsw_remove_dt(pdev);
2982 	pm_runtime_put_sync(&pdev->dev);
2983 	pm_runtime_disable(&pdev->dev);
2984 	return 0;
2985 }
2986 
2987 #ifdef CONFIG_PM_SLEEP
2988 static int cpsw_suspend(struct device *dev)
2989 {
2990 	struct cpsw_common *cpsw = dev_get_drvdata(dev);
2991 	int i;
2992 
2993 	for (i = 0; i < cpsw->data.slaves; i++)
2994 		if (cpsw->slaves[i].ndev)
2995 			if (netif_running(cpsw->slaves[i].ndev))
2996 				cpsw_ndo_stop(cpsw->slaves[i].ndev);
2997 
2998 	/* Select sleep pin state */
2999 	pinctrl_pm_select_sleep_state(dev);
3000 
3001 	return 0;
3002 }
3003 
3004 static int cpsw_resume(struct device *dev)
3005 {
3006 	struct cpsw_common *cpsw = dev_get_drvdata(dev);
3007 	int i;
3008 
3009 	/* Select default pin state */
3010 	pinctrl_pm_select_default_state(dev);
3011 
3012 	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3013 	rtnl_lock();
3014 
3015 	for (i = 0; i < cpsw->data.slaves; i++)
3016 		if (cpsw->slaves[i].ndev)
3017 			if (netif_running(cpsw->slaves[i].ndev))
3018 				cpsw_ndo_open(cpsw->slaves[i].ndev);
3019 
3020 	rtnl_unlock();
3021 
3022 	return 0;
3023 }
3024 #endif
3025 
3026 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3027 
3028 static struct platform_driver cpsw_driver = {
3029 	.driver = {
3030 		.name	 = "cpsw",
3031 		.pm	 = &cpsw_pm_ops,
3032 		.of_match_table = cpsw_of_mtable,
3033 	},
3034 	.probe = cpsw_probe,
3035 	.remove = cpsw_remove,
3036 };
3037 
3038 module_platform_driver(cpsw_driver);
3039 
3040 MODULE_LICENSE("GPL");
3041 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3042 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3043 MODULE_DESCRIPTION("TI CPSW Ethernet driver");
3044