xref: /openbmc/linux/drivers/net/ethernet/ti/cpsw_new.c (revision d9f6e12f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver
4  *
5  * Copyright (C) 2019 Texas Instruments
6  */
7 
8 #include <linux/io.h>
9 #include <linux/clk.h>
10 #include <linux/timer.h>
11 #include <linux/module.h>
12 #include <linux/irqreturn.h>
13 #include <linux/interrupt.h>
14 #include <linux/if_ether.h>
15 #include <linux/etherdevice.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/phy.h>
18 #include <linux/phy/phy.h>
19 #include <linux/delay.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/of.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_device.h>
27 #include <linux/if_vlan.h>
28 #include <linux/kmemleak.h>
29 #include <linux/sys_soc.h>
30 
31 #include <net/page_pool.h>
32 #include <net/pkt_cls.h>
33 #include <net/devlink.h>
34 
35 #include "cpsw.h"
36 #include "cpsw_ale.h"
37 #include "cpsw_priv.h"
38 #include "cpsw_sl.h"
39 #include "cpsw_switchdev.h"
40 #include "cpts.h"
41 #include "davinci_cpdma.h"
42 
43 #include <net/pkt_sched.h>
44 
45 static int debug_level;
46 static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
47 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
48 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
49 
50 struct cpsw_devlink {
51 	struct cpsw_common *cpsw;
52 };
53 
54 enum cpsw_devlink_param_id {
55 	CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
56 	CPSW_DL_PARAM_SWITCH_MODE,
57 	CPSW_DL_PARAM_ALE_BYPASS,
58 };
59 
60 /* struct cpsw_common is not needed, kept here for compatibility
61  * reasons witrh the old driver
62  */
63 static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
64 				 struct cpsw_priv *priv)
65 {
66 	if (priv->emac_port == HOST_PORT_NUM)
67 		return -1;
68 
69 	return priv->emac_port - 1;
70 }
71 
72 static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
73 {
74 	return !cpsw->data.dual_emac;
75 }
76 
77 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
78 {
79 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
80 	bool enable_uni = false;
81 	int i;
82 
83 	if (cpsw_is_switch_en(cpsw))
84 		return;
85 
86 	/* Enabling promiscuous mode for one interface will be
87 	 * common for both the interface as the interface shares
88 	 * the same hardware resource.
89 	 */
90 	for (i = 0; i < cpsw->data.slaves; i++)
91 		if (cpsw->slaves[i].ndev &&
92 		    (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
93 			enable_uni = true;
94 
95 	if (!enable && enable_uni) {
96 		enable = enable_uni;
97 		dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
98 	}
99 
100 	if (enable) {
101 		/* Enable unknown unicast, reg/unreg mcast */
102 		cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
103 				     ALE_P0_UNI_FLOOD, 1);
104 
105 		dev_dbg(cpsw->dev, "promiscuity enabled\n");
106 	} else {
107 		/* Disable unknown unicast */
108 		cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
109 				     ALE_P0_UNI_FLOOD, 0);
110 		dev_dbg(cpsw->dev, "promiscuity disabled\n");
111 	}
112 }
113 
114 /**
115  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
116  * if it's not deleted
117  * @ndev: device to sync
118  * @addr: address to be added or deleted
119  * @vid: vlan id, if vid < 0 set/unset address for real device
120  * @add: add address if the flag is set or remove otherwise
121  */
122 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
123 		       int vid, int add)
124 {
125 	struct cpsw_priv *priv = netdev_priv(ndev);
126 	struct cpsw_common *cpsw = priv->cpsw;
127 	int mask, flags, ret, slave_no;
128 
129 	slave_no = cpsw_slave_index(cpsw, priv);
130 	if (vid < 0)
131 		vid = cpsw->slaves[slave_no].port_vlan;
132 
133 	mask =  ALE_PORT_HOST;
134 	flags = vid ? ALE_VLAN : 0;
135 
136 	if (add)
137 		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
138 	else
139 		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
140 
141 	return ret;
142 }
143 
144 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
145 {
146 	struct addr_sync_ctx *sync_ctx = ctx;
147 	struct netdev_hw_addr *ha;
148 	int found = 0, ret = 0;
149 
150 	if (!vdev || !(vdev->flags & IFF_UP))
151 		return 0;
152 
153 	/* vlan address is relevant if its sync_cnt != 0 */
154 	netdev_for_each_mc_addr(ha, vdev) {
155 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
156 			found = ha->sync_cnt;
157 			break;
158 		}
159 	}
160 
161 	if (found)
162 		sync_ctx->consumed++;
163 
164 	if (sync_ctx->flush) {
165 		if (!found)
166 			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
167 		return 0;
168 	}
169 
170 	if (found)
171 		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
172 
173 	return ret;
174 }
175 
176 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
177 {
178 	struct addr_sync_ctx sync_ctx;
179 	int ret;
180 
181 	sync_ctx.consumed = 0;
182 	sync_ctx.addr = addr;
183 	sync_ctx.ndev = ndev;
184 	sync_ctx.flush = 0;
185 
186 	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
187 	if (sync_ctx.consumed < num && !ret)
188 		ret = cpsw_set_mc(ndev, addr, -1, 1);
189 
190 	return ret;
191 }
192 
193 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
194 {
195 	struct addr_sync_ctx sync_ctx;
196 
197 	sync_ctx.consumed = 0;
198 	sync_ctx.addr = addr;
199 	sync_ctx.ndev = ndev;
200 	sync_ctx.flush = 1;
201 
202 	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
203 	if (sync_ctx.consumed == num)
204 		cpsw_set_mc(ndev, addr, -1, 0);
205 
206 	return 0;
207 }
208 
209 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
210 {
211 	struct addr_sync_ctx *sync_ctx = ctx;
212 	struct netdev_hw_addr *ha;
213 	int found = 0;
214 
215 	if (!vdev || !(vdev->flags & IFF_UP))
216 		return 0;
217 
218 	/* vlan address is relevant if its sync_cnt != 0 */
219 	netdev_for_each_mc_addr(ha, vdev) {
220 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
221 			found = ha->sync_cnt;
222 			break;
223 		}
224 	}
225 
226 	if (!found)
227 		return 0;
228 
229 	sync_ctx->consumed++;
230 	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
231 	return 0;
232 }
233 
234 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
235 {
236 	struct addr_sync_ctx sync_ctx;
237 
238 	sync_ctx.addr = addr;
239 	sync_ctx.ndev = ndev;
240 	sync_ctx.consumed = 0;
241 
242 	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
243 	if (sync_ctx.consumed < num)
244 		cpsw_set_mc(ndev, addr, -1, 0);
245 
246 	return 0;
247 }
248 
249 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
250 {
251 	struct cpsw_priv *priv = netdev_priv(ndev);
252 	struct cpsw_common *cpsw = priv->cpsw;
253 
254 	if (ndev->flags & IFF_PROMISC) {
255 		/* Enable promiscuous mode */
256 		cpsw_set_promiscious(ndev, true);
257 		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
258 		return;
259 	}
260 
261 	/* Disable promiscuous mode */
262 	cpsw_set_promiscious(ndev, false);
263 
264 	/* Restore allmulti on vlans if necessary */
265 	cpsw_ale_set_allmulti(cpsw->ale,
266 			      ndev->flags & IFF_ALLMULTI, priv->emac_port);
267 
268 	/* add/remove mcast address either for real netdev or for vlan */
269 	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
270 			       cpsw_del_mc_addr);
271 }
272 
273 static unsigned int cpsw_rxbuf_total_len(unsigned int len)
274 {
275 	len += CPSW_HEADROOM;
276 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
277 
278 	return SKB_DATA_ALIGN(len);
279 }
280 
281 static void cpsw_rx_handler(void *token, int len, int status)
282 {
283 	struct page *new_page, *page = token;
284 	void *pa = page_address(page);
285 	int headroom = CPSW_HEADROOM;
286 	struct cpsw_meta_xdp *xmeta;
287 	struct cpsw_common *cpsw;
288 	struct net_device *ndev;
289 	int port, ch, pkt_size;
290 	struct cpsw_priv *priv;
291 	struct page_pool *pool;
292 	struct sk_buff *skb;
293 	struct xdp_buff xdp;
294 	int ret = 0;
295 	dma_addr_t dma;
296 
297 	xmeta = pa + CPSW_XMETA_OFFSET;
298 	cpsw = ndev_to_cpsw(xmeta->ndev);
299 	ndev = xmeta->ndev;
300 	pkt_size = cpsw->rx_packet_max;
301 	ch = xmeta->ch;
302 
303 	if (status >= 0) {
304 		port = CPDMA_RX_SOURCE_PORT(status);
305 		if (port)
306 			ndev = cpsw->slaves[--port].ndev;
307 	}
308 
309 	priv = netdev_priv(ndev);
310 	pool = cpsw->page_pool[ch];
311 
312 	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
313 		/* In dual emac mode check for all interfaces */
314 		if (cpsw->usage_count && status >= 0) {
315 			/* The packet received is for the interface which
316 			 * is already down and the other interface is up
317 			 * and running, instead of freeing which results
318 			 * in reducing of the number of rx descriptor in
319 			 * DMA engine, requeue page back to cpdma.
320 			 */
321 			new_page = page;
322 			goto requeue;
323 		}
324 
325 		/* the interface is going down, pages are purged */
326 		page_pool_recycle_direct(pool, page);
327 		return;
328 	}
329 
330 	new_page = page_pool_dev_alloc_pages(pool);
331 	if (unlikely(!new_page)) {
332 		new_page = page;
333 		ndev->stats.rx_dropped++;
334 		goto requeue;
335 	}
336 
337 	if (priv->xdp_prog) {
338 		int headroom = CPSW_HEADROOM, size = len;
339 
340 		xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
341 		if (status & CPDMA_RX_VLAN_ENCAP) {
342 			headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
343 			size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
344 		}
345 
346 		xdp_prepare_buff(&xdp, pa, headroom, size, false);
347 
348 		ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
349 		if (ret != CPSW_XDP_PASS)
350 			goto requeue;
351 
352 		headroom = xdp.data - xdp.data_hard_start;
353 
354 		/* XDP prog can modify vlan tag, so can't use encap header */
355 		status &= ~CPDMA_RX_VLAN_ENCAP;
356 	}
357 
358 	/* pass skb to netstack if no XDP prog or returned XDP_PASS */
359 	skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
360 	if (!skb) {
361 		ndev->stats.rx_dropped++;
362 		page_pool_recycle_direct(pool, page);
363 		goto requeue;
364 	}
365 
366 	skb->offload_fwd_mark = priv->offload_fwd_mark;
367 	skb_reserve(skb, headroom);
368 	skb_put(skb, len);
369 	skb->dev = ndev;
370 	if (status & CPDMA_RX_VLAN_ENCAP)
371 		cpsw_rx_vlan_encap(skb);
372 	if (priv->rx_ts_enabled)
373 		cpts_rx_timestamp(cpsw->cpts, skb);
374 	skb->protocol = eth_type_trans(skb, ndev);
375 
376 	/* unmap page as no netstack skb page recycling */
377 	page_pool_release_page(pool, page);
378 	netif_receive_skb(skb);
379 
380 	ndev->stats.rx_bytes += len;
381 	ndev->stats.rx_packets++;
382 
383 requeue:
384 	xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
385 	xmeta->ndev = ndev;
386 	xmeta->ch = ch;
387 
388 	dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
389 	ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
390 				       pkt_size, 0);
391 	if (ret < 0) {
392 		WARN_ON(ret == -ENOMEM);
393 		page_pool_recycle_direct(pool, new_page);
394 	}
395 }
396 
397 static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
398 				   unsigned short vid)
399 {
400 	struct cpsw_common *cpsw = priv->cpsw;
401 	int unreg_mcast_mask = 0;
402 	int mcast_mask;
403 	u32 port_mask;
404 	int ret;
405 
406 	port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
407 
408 	mcast_mask = ALE_PORT_HOST;
409 	if (priv->ndev->flags & IFF_ALLMULTI)
410 		unreg_mcast_mask = mcast_mask;
411 
412 	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
413 				unreg_mcast_mask);
414 	if (ret != 0)
415 		return ret;
416 
417 	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
418 				 HOST_PORT_NUM, ALE_VLAN, vid);
419 	if (ret != 0)
420 		goto clean_vid;
421 
422 	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
423 				 mcast_mask, ALE_VLAN, vid, 0);
424 	if (ret != 0)
425 		goto clean_vlan_ucast;
426 	return 0;
427 
428 clean_vlan_ucast:
429 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
430 			   HOST_PORT_NUM, ALE_VLAN, vid);
431 clean_vid:
432 	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
433 	return ret;
434 }
435 
436 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
437 				    __be16 proto, u16 vid)
438 {
439 	struct cpsw_priv *priv = netdev_priv(ndev);
440 	struct cpsw_common *cpsw = priv->cpsw;
441 	int ret, i;
442 
443 	if (cpsw_is_switch_en(cpsw)) {
444 		dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
445 		return 0;
446 	}
447 
448 	if (vid == cpsw->data.default_vlan)
449 		return 0;
450 
451 	ret = pm_runtime_get_sync(cpsw->dev);
452 	if (ret < 0) {
453 		pm_runtime_put_noidle(cpsw->dev);
454 		return ret;
455 	}
456 
457 	/* In dual EMAC, reserved VLAN id should not be used for
458 	 * creating VLAN interfaces as this can break the dual
459 	 * EMAC port separation
460 	 */
461 	for (i = 0; i < cpsw->data.slaves; i++) {
462 		if (cpsw->slaves[i].ndev &&
463 		    vid == cpsw->slaves[i].port_vlan) {
464 			ret = -EINVAL;
465 			goto err;
466 		}
467 	}
468 
469 	dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
470 	ret = cpsw_add_vlan_ale_entry(priv, vid);
471 err:
472 	pm_runtime_put(cpsw->dev);
473 	return ret;
474 }
475 
476 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
477 {
478 	struct cpsw_priv *priv = arg;
479 
480 	if (!vdev || !vid)
481 		return 0;
482 
483 	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
484 	return 0;
485 }
486 
487 /* restore resources after port reset */
488 static void cpsw_restore(struct cpsw_priv *priv)
489 {
490 	struct cpsw_common *cpsw = priv->cpsw;
491 
492 	/* restore vlan configurations */
493 	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
494 
495 	/* restore MQPRIO offload */
496 	cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
497 
498 	/* restore CBS offload */
499 	cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
500 }
501 
502 static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
503 {
504 	char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
505 
506 	cpsw_ale_add_mcast(cpsw->ale, stpa,
507 			   ALE_PORT_HOST, ALE_SUPER, 0,
508 			   ALE_MCAST_BLOCK_LEARN_FWD);
509 }
510 
511 static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
512 {
513 	int vlan = cpsw->data.default_vlan;
514 
515 	writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
516 
517 	writel(vlan, &cpsw->host_port_regs->port_vlan);
518 
519 	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
520 			  ALE_ALL_PORTS, ALE_ALL_PORTS,
521 			  ALE_PORT_1 | ALE_PORT_2);
522 
523 	cpsw_init_stp_ale_entry(cpsw);
524 
525 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
526 	dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
527 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
528 }
529 
530 static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
531 {
532 	int vlan = cpsw->data.default_vlan;
533 
534 	writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
535 
536 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
537 	dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
538 
539 	writel(vlan, &cpsw->host_port_regs->port_vlan);
540 
541 	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
542 	/* learning make no sense in dual_mac mode */
543 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
544 }
545 
546 static void cpsw_init_host_port(struct cpsw_priv *priv)
547 {
548 	struct cpsw_common *cpsw = priv->cpsw;
549 	u32 control_reg;
550 
551 	/* soft reset the controller and initialize ale */
552 	soft_reset("cpsw", &cpsw->regs->soft_reset);
553 	cpsw_ale_start(cpsw->ale);
554 
555 	/* switch to vlan unaware mode */
556 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
557 			     CPSW_ALE_VLAN_AWARE);
558 	control_reg = readl(&cpsw->regs->control);
559 	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
560 	writel(control_reg, &cpsw->regs->control);
561 
562 	/* setup host port priority mapping */
563 	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
564 		       &cpsw->host_port_regs->cpdma_tx_pri_map);
565 	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
566 
567 	/* disable priority elevation */
568 	writel_relaxed(0, &cpsw->regs->ptype);
569 
570 	/* enable statistics collection only on all ports */
571 	writel_relaxed(0x7, &cpsw->regs->stat_port_en);
572 
573 	/* Enable internal fifo flow control */
574 	writel(0x7, &cpsw->regs->flow_control);
575 
576 	if (cpsw_is_switch_en(cpsw))
577 		cpsw_init_host_port_switch(cpsw);
578 	else
579 		cpsw_init_host_port_dual_mac(cpsw);
580 
581 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
582 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
583 }
584 
585 static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
586 						    struct cpsw_slave *slave)
587 {
588 	u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
589 	struct cpsw_common *cpsw = priv->cpsw;
590 	u32 reg;
591 
592 	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
593 	       CPSW2_PORT_VLAN;
594 	slave_write(slave, slave->port_vlan, reg);
595 
596 	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
597 			  port_mask, port_mask, 0);
598 	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
599 			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
600 			   ALE_MCAST_FWD);
601 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
602 			   HOST_PORT_NUM, ALE_VLAN |
603 			   ALE_SECURE, slave->port_vlan);
604 	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
605 			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
606 	/* learning make no sense in dual_mac mode */
607 	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
608 			     ALE_PORT_NOLEARN, 1);
609 }
610 
611 static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
612 						 struct cpsw_slave *slave)
613 {
614 	u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
615 	struct cpsw_common *cpsw = priv->cpsw;
616 	u32 reg;
617 
618 	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
619 			     ALE_PORT_DROP_UNKNOWN_VLAN, 0);
620 	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
621 			     ALE_PORT_NOLEARN, 0);
622 	/* disabling SA_UPDATE required to make stp work, without this setting
623 	 * Host MAC addresses will jump between ports.
624 	 * As per TRM MAC address can be defined as unicast supervisory (super)
625 	 * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
626 	 * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
627 	 * causes STP packets to be dropped due to ingress filter
628 	 *	if (source address found) and (secure) and
629 	 *	   (receive port number != port_number))
630 	 *	   then discard the packet
631 	 */
632 	cpsw_ale_control_set(cpsw->ale, priv->emac_port,
633 			     ALE_PORT_NO_SA_UPDATE, 1);
634 
635 	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
636 			   port_mask, ALE_VLAN, slave->port_vlan,
637 			   ALE_MCAST_FWD_2);
638 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
639 			   HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
640 
641 	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
642 	       CPSW2_PORT_VLAN;
643 	slave_write(slave, slave->port_vlan, reg);
644 }
645 
646 static void cpsw_adjust_link(struct net_device *ndev)
647 {
648 	struct cpsw_priv *priv = netdev_priv(ndev);
649 	struct cpsw_common *cpsw = priv->cpsw;
650 	struct cpsw_slave *slave;
651 	struct phy_device *phy;
652 	u32 mac_control = 0;
653 
654 	slave = &cpsw->slaves[priv->emac_port - 1];
655 	phy = slave->phy;
656 
657 	if (!phy)
658 		return;
659 
660 	if (phy->link) {
661 		mac_control = CPSW_SL_CTL_GMII_EN;
662 
663 		if (phy->speed == 1000)
664 			mac_control |= CPSW_SL_CTL_GIG;
665 		if (phy->duplex)
666 			mac_control |= CPSW_SL_CTL_FULLDUPLEX;
667 
668 		/* set speed_in input in case RMII mode is used in 100Mbps */
669 		if (phy->speed == 100)
670 			mac_control |= CPSW_SL_CTL_IFCTL_A;
671 		/* in band mode only works in 10Mbps RGMII mode */
672 		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
673 			mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
674 
675 		if (priv->rx_pause)
676 			mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
677 
678 		if (priv->tx_pause)
679 			mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
680 
681 		if (mac_control != slave->mac_control)
682 			cpsw_sl_ctl_set(slave->mac_sl, mac_control);
683 
684 		/* enable forwarding */
685 		cpsw_ale_control_set(cpsw->ale, priv->emac_port,
686 				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
687 
688 		netif_tx_wake_all_queues(ndev);
689 
690 		if (priv->shp_cfg_speed &&
691 		    priv->shp_cfg_speed != slave->phy->speed &&
692 		    !cpsw_shp_is_off(priv))
693 			dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
694 	} else {
695 		netif_tx_stop_all_queues(ndev);
696 
697 		mac_control = 0;
698 		/* disable forwarding */
699 		cpsw_ale_control_set(cpsw->ale, priv->emac_port,
700 				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
701 
702 		cpsw_sl_wait_for_idle(slave->mac_sl, 100);
703 
704 		cpsw_sl_ctl_reset(slave->mac_sl);
705 	}
706 
707 	if (mac_control != slave->mac_control)
708 		phy_print_status(phy);
709 
710 	slave->mac_control = mac_control;
711 
712 	if (phy->link && cpsw_need_resplit(cpsw))
713 		cpsw_split_res(cpsw);
714 }
715 
716 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
717 {
718 	struct cpsw_common *cpsw = priv->cpsw;
719 	struct phy_device *phy;
720 
721 	cpsw_sl_reset(slave->mac_sl, 100);
722 	cpsw_sl_ctl_reset(slave->mac_sl);
723 
724 	/* setup priority mapping */
725 	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
726 			  RX_PRIORITY_MAPPING);
727 
728 	switch (cpsw->version) {
729 	case CPSW_VERSION_1:
730 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
731 		/* Increase RX FIFO size to 5 for supporting fullduplex
732 		 * flow control mode
733 		 */
734 		slave_write(slave,
735 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
736 			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
737 		break;
738 	case CPSW_VERSION_2:
739 	case CPSW_VERSION_3:
740 	case CPSW_VERSION_4:
741 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
742 		/* Increase RX FIFO size to 5 for supporting fullduplex
743 		 * flow control mode
744 		 */
745 		slave_write(slave,
746 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
747 			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
748 		break;
749 	}
750 
751 	/* setup max packet size, and mac address */
752 	cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
753 			  cpsw->rx_packet_max);
754 	cpsw_set_slave_mac(slave, priv);
755 
756 	slave->mac_control = 0;	/* no link yet */
757 
758 	if (cpsw_is_switch_en(cpsw))
759 		cpsw_port_add_switch_def_ale_entries(priv, slave);
760 	else
761 		cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
762 
763 	if (!slave->data->phy_node)
764 		dev_err(priv->dev, "no phy found on slave %d\n",
765 			slave->slave_num);
766 	phy = of_phy_connect(priv->ndev, slave->data->phy_node,
767 			     &cpsw_adjust_link, 0, slave->data->phy_if);
768 	if (!phy) {
769 		dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
770 			slave->data->phy_node,
771 			slave->slave_num);
772 		return;
773 	}
774 	slave->phy = phy;
775 
776 	phy_attached_info(slave->phy);
777 
778 	phy_start(slave->phy);
779 
780 	/* Configure GMII_SEL register */
781 	phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
782 			 slave->data->phy_if);
783 }
784 
785 static int cpsw_ndo_stop(struct net_device *ndev)
786 {
787 	struct cpsw_priv *priv = netdev_priv(ndev);
788 	struct cpsw_common *cpsw = priv->cpsw;
789 	struct cpsw_slave *slave;
790 
791 	cpsw_info(priv, ifdown, "shutting down ndev\n");
792 	slave = &cpsw->slaves[priv->emac_port - 1];
793 	if (slave->phy)
794 		phy_stop(slave->phy);
795 
796 	netif_tx_stop_all_queues(priv->ndev);
797 
798 	if (slave->phy) {
799 		phy_disconnect(slave->phy);
800 		slave->phy = NULL;
801 	}
802 
803 	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
804 
805 	if (cpsw->usage_count <= 1) {
806 		napi_disable(&cpsw->napi_rx);
807 		napi_disable(&cpsw->napi_tx);
808 		cpts_unregister(cpsw->cpts);
809 		cpsw_intr_disable(cpsw);
810 		cpdma_ctlr_stop(cpsw->dma);
811 		cpsw_ale_stop(cpsw->ale);
812 		cpsw_destroy_xdp_rxqs(cpsw);
813 	}
814 
815 	if (cpsw_need_resplit(cpsw))
816 		cpsw_split_res(cpsw);
817 
818 	cpsw->usage_count--;
819 	pm_runtime_put_sync(cpsw->dev);
820 	return 0;
821 }
822 
823 static int cpsw_ndo_open(struct net_device *ndev)
824 {
825 	struct cpsw_priv *priv = netdev_priv(ndev);
826 	struct cpsw_common *cpsw = priv->cpsw;
827 	int ret;
828 
829 	dev_info(priv->dev, "starting ndev. mode: %s\n",
830 		 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
831 	ret = pm_runtime_get_sync(cpsw->dev);
832 	if (ret < 0) {
833 		pm_runtime_put_noidle(cpsw->dev);
834 		return ret;
835 	}
836 
837 	/* Notify the stack of the actual queue counts. */
838 	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
839 	if (ret) {
840 		dev_err(priv->dev, "cannot set real number of tx queues\n");
841 		goto pm_cleanup;
842 	}
843 
844 	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
845 	if (ret) {
846 		dev_err(priv->dev, "cannot set real number of rx queues\n");
847 		goto pm_cleanup;
848 	}
849 
850 	/* Initialize host and slave ports */
851 	if (!cpsw->usage_count)
852 		cpsw_init_host_port(priv);
853 	cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
854 
855 	/* initialize shared resources for every ndev */
856 	if (!cpsw->usage_count) {
857 		/* create rxqs for both infs in dual mac as they use same pool
858 		 * and must be destroyed together when no users.
859 		 */
860 		ret = cpsw_create_xdp_rxqs(cpsw);
861 		if (ret < 0)
862 			goto err_cleanup;
863 
864 		ret = cpsw_fill_rx_channels(priv);
865 		if (ret < 0)
866 			goto err_cleanup;
867 
868 		if (cpsw->cpts) {
869 			if (cpts_register(cpsw->cpts))
870 				dev_err(priv->dev, "error registering cpts device\n");
871 			else
872 				writel(0x10, &cpsw->wr_regs->misc_en);
873 		}
874 
875 		napi_enable(&cpsw->napi_rx);
876 		napi_enable(&cpsw->napi_tx);
877 
878 		if (cpsw->tx_irq_disabled) {
879 			cpsw->tx_irq_disabled = false;
880 			enable_irq(cpsw->irqs_table[1]);
881 		}
882 
883 		if (cpsw->rx_irq_disabled) {
884 			cpsw->rx_irq_disabled = false;
885 			enable_irq(cpsw->irqs_table[0]);
886 		}
887 	}
888 
889 	cpsw_restore(priv);
890 
891 	/* Enable Interrupt pacing if configured */
892 	if (cpsw->coal_intvl != 0) {
893 		struct ethtool_coalesce coal;
894 
895 		coal.rx_coalesce_usecs = cpsw->coal_intvl;
896 		cpsw_set_coalesce(ndev, &coal);
897 	}
898 
899 	cpdma_ctlr_start(cpsw->dma);
900 	cpsw_intr_enable(cpsw);
901 	cpsw->usage_count++;
902 
903 	return 0;
904 
905 err_cleanup:
906 	cpsw_ndo_stop(ndev);
907 
908 pm_cleanup:
909 	pm_runtime_put_sync(cpsw->dev);
910 	return ret;
911 }
912 
913 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
914 				       struct net_device *ndev)
915 {
916 	struct cpsw_priv *priv = netdev_priv(ndev);
917 	struct cpsw_common *cpsw = priv->cpsw;
918 	struct cpts *cpts = cpsw->cpts;
919 	struct netdev_queue *txq;
920 	struct cpdma_chan *txch;
921 	int ret, q_idx;
922 
923 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
924 		cpsw_err(priv, tx_err, "packet pad failed\n");
925 		ndev->stats.tx_dropped++;
926 		return NET_XMIT_DROP;
927 	}
928 
929 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
930 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
931 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
932 
933 	q_idx = skb_get_queue_mapping(skb);
934 	if (q_idx >= cpsw->tx_ch_num)
935 		q_idx = q_idx % cpsw->tx_ch_num;
936 
937 	txch = cpsw->txv[q_idx].ch;
938 	txq = netdev_get_tx_queue(ndev, q_idx);
939 	skb_tx_timestamp(skb);
940 	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
941 				priv->emac_port);
942 	if (unlikely(ret != 0)) {
943 		cpsw_err(priv, tx_err, "desc submit failed\n");
944 		goto fail;
945 	}
946 
947 	/* If there is no more tx desc left free then we need to
948 	 * tell the kernel to stop sending us tx frames.
949 	 */
950 	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
951 		netif_tx_stop_queue(txq);
952 
953 		/* Barrier, so that stop_queue visible to other cpus */
954 		smp_mb__after_atomic();
955 
956 		if (cpdma_check_free_tx_desc(txch))
957 			netif_tx_wake_queue(txq);
958 	}
959 
960 	return NETDEV_TX_OK;
961 fail:
962 	ndev->stats.tx_dropped++;
963 	netif_tx_stop_queue(txq);
964 
965 	/* Barrier, so that stop_queue visible to other cpus */
966 	smp_mb__after_atomic();
967 
968 	if (cpdma_check_free_tx_desc(txch))
969 		netif_tx_wake_queue(txq);
970 
971 	return NETDEV_TX_BUSY;
972 }
973 
974 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
975 {
976 	struct sockaddr *addr = (struct sockaddr *)p;
977 	struct cpsw_priv *priv = netdev_priv(ndev);
978 	struct cpsw_common *cpsw = priv->cpsw;
979 	int ret, slave_no;
980 	int flags = 0;
981 	u16 vid = 0;
982 
983 	slave_no = cpsw_slave_index(cpsw, priv);
984 	if (!is_valid_ether_addr(addr->sa_data))
985 		return -EADDRNOTAVAIL;
986 
987 	ret = pm_runtime_get_sync(cpsw->dev);
988 	if (ret < 0) {
989 		pm_runtime_put_noidle(cpsw->dev);
990 		return ret;
991 	}
992 
993 	vid = cpsw->slaves[slave_no].port_vlan;
994 	flags = ALE_VLAN | ALE_SECURE;
995 
996 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
997 			   flags, vid);
998 	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
999 			   flags, vid);
1000 
1001 	ether_addr_copy(priv->mac_addr, addr->sa_data);
1002 	ether_addr_copy(ndev->dev_addr, priv->mac_addr);
1003 	cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1004 
1005 	pm_runtime_put(cpsw->dev);
1006 
1007 	return 0;
1008 }
1009 
1010 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1011 				     __be16 proto, u16 vid)
1012 {
1013 	struct cpsw_priv *priv = netdev_priv(ndev);
1014 	struct cpsw_common *cpsw = priv->cpsw;
1015 	int ret;
1016 	int i;
1017 
1018 	if (cpsw_is_switch_en(cpsw)) {
1019 		dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1020 		return 0;
1021 	}
1022 
1023 	if (vid == cpsw->data.default_vlan)
1024 		return 0;
1025 
1026 	ret = pm_runtime_get_sync(cpsw->dev);
1027 	if (ret < 0) {
1028 		pm_runtime_put_noidle(cpsw->dev);
1029 		return ret;
1030 	}
1031 
1032 	/* reset the return code as pm_runtime_get_sync() can return
1033 	 * non zero values as well.
1034 	 */
1035 	ret = 0;
1036 	for (i = 0; i < cpsw->data.slaves; i++) {
1037 		if (cpsw->slaves[i].ndev &&
1038 		    vid == cpsw->slaves[i].port_vlan) {
1039 			ret = -EINVAL;
1040 			goto err;
1041 		}
1042 	}
1043 
1044 	dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1045 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1046 	if (ret)
1047 		dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1048 	ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1049 				 HOST_PORT_NUM, ALE_VLAN, vid);
1050 	if (ret)
1051 		dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1052 			ret);
1053 	ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1054 				 0, ALE_VLAN, vid);
1055 	if (ret)
1056 		dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1057 			ret);
1058 	cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1059 	ret = 0;
1060 err:
1061 	pm_runtime_put(cpsw->dev);
1062 	return ret;
1063 }
1064 
1065 static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1066 				       size_t len)
1067 {
1068 	struct cpsw_priv *priv = netdev_priv(ndev);
1069 	int err;
1070 
1071 	err = snprintf(name, len, "p%d", priv->emac_port);
1072 
1073 	if (err >= len)
1074 		return -EINVAL;
1075 
1076 	return 0;
1077 }
1078 
1079 #ifdef CONFIG_NET_POLL_CONTROLLER
1080 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1081 {
1082 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1083 
1084 	cpsw_intr_disable(cpsw);
1085 	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1086 	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1087 	cpsw_intr_enable(cpsw);
1088 }
1089 #endif
1090 
1091 static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1092 			     struct xdp_frame **frames, u32 flags)
1093 {
1094 	struct cpsw_priv *priv = netdev_priv(ndev);
1095 	struct xdp_frame *xdpf;
1096 	int i, drops = 0;
1097 
1098 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1099 		return -EINVAL;
1100 
1101 	for (i = 0; i < n; i++) {
1102 		xdpf = frames[i];
1103 		if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
1104 			xdp_return_frame_rx_napi(xdpf);
1105 			drops++;
1106 			continue;
1107 		}
1108 
1109 		if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1110 			drops++;
1111 	}
1112 
1113 	return n - drops;
1114 }
1115 
1116 static int cpsw_get_port_parent_id(struct net_device *ndev,
1117 				   struct netdev_phys_item_id *ppid)
1118 {
1119 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1120 
1121 	ppid->id_len = sizeof(cpsw->base_mac);
1122 	memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1123 
1124 	return 0;
1125 }
1126 
1127 static const struct net_device_ops cpsw_netdev_ops = {
1128 	.ndo_open		= cpsw_ndo_open,
1129 	.ndo_stop		= cpsw_ndo_stop,
1130 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
1131 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
1132 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
1133 	.ndo_validate_addr	= eth_validate_addr,
1134 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
1135 	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
1136 	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
1137 #ifdef CONFIG_NET_POLL_CONTROLLER
1138 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
1139 #endif
1140 	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
1141 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
1142 	.ndo_setup_tc           = cpsw_ndo_setup_tc,
1143 	.ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1144 	.ndo_bpf		= cpsw_ndo_bpf,
1145 	.ndo_xdp_xmit		= cpsw_ndo_xdp_xmit,
1146 	.ndo_get_port_parent_id	= cpsw_get_port_parent_id,
1147 };
1148 
1149 static void cpsw_get_drvinfo(struct net_device *ndev,
1150 			     struct ethtool_drvinfo *info)
1151 {
1152 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1153 	struct platform_device *pdev;
1154 
1155 	pdev = to_platform_device(cpsw->dev);
1156 	strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1157 	strlcpy(info->version, "2.0", sizeof(info->version));
1158 	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1159 }
1160 
1161 static int cpsw_set_pauseparam(struct net_device *ndev,
1162 			       struct ethtool_pauseparam *pause)
1163 {
1164 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1165 	struct cpsw_priv *priv = netdev_priv(ndev);
1166 	int slave_no;
1167 
1168 	slave_no = cpsw_slave_index(cpsw, priv);
1169 	if (!cpsw->slaves[slave_no].phy)
1170 		return -EINVAL;
1171 
1172 	if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1173 		return -EINVAL;
1174 
1175 	priv->rx_pause = pause->rx_pause ? true : false;
1176 	priv->tx_pause = pause->tx_pause ? true : false;
1177 
1178 	phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1179 			   priv->rx_pause, priv->tx_pause);
1180 
1181 	return 0;
1182 }
1183 
1184 static int cpsw_set_channels(struct net_device *ndev,
1185 			     struct ethtool_channels *chs)
1186 {
1187 	return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1188 }
1189 
1190 static const struct ethtool_ops cpsw_ethtool_ops = {
1191 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1192 	.get_drvinfo		= cpsw_get_drvinfo,
1193 	.get_msglevel		= cpsw_get_msglevel,
1194 	.set_msglevel		= cpsw_set_msglevel,
1195 	.get_link		= ethtool_op_get_link,
1196 	.get_ts_info		= cpsw_get_ts_info,
1197 	.get_coalesce		= cpsw_get_coalesce,
1198 	.set_coalesce		= cpsw_set_coalesce,
1199 	.get_sset_count		= cpsw_get_sset_count,
1200 	.get_strings		= cpsw_get_strings,
1201 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
1202 	.get_pauseparam		= cpsw_get_pauseparam,
1203 	.set_pauseparam		= cpsw_set_pauseparam,
1204 	.get_wol		= cpsw_get_wol,
1205 	.set_wol		= cpsw_set_wol,
1206 	.get_regs_len		= cpsw_get_regs_len,
1207 	.get_regs		= cpsw_get_regs,
1208 	.begin			= cpsw_ethtool_op_begin,
1209 	.complete		= cpsw_ethtool_op_complete,
1210 	.get_channels		= cpsw_get_channels,
1211 	.set_channels		= cpsw_set_channels,
1212 	.get_link_ksettings	= cpsw_get_link_ksettings,
1213 	.set_link_ksettings	= cpsw_set_link_ksettings,
1214 	.get_eee		= cpsw_get_eee,
1215 	.set_eee		= cpsw_set_eee,
1216 	.nway_reset		= cpsw_nway_reset,
1217 	.get_ringparam		= cpsw_get_ringparam,
1218 	.set_ringparam		= cpsw_set_ringparam,
1219 };
1220 
1221 static int cpsw_probe_dt(struct cpsw_common *cpsw)
1222 {
1223 	struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1224 	struct cpsw_platform_data *data = &cpsw->data;
1225 	struct device *dev = cpsw->dev;
1226 	int ret;
1227 	u32 prop;
1228 
1229 	if (!node)
1230 		return -EINVAL;
1231 
1232 	tmp_node = of_get_child_by_name(node, "ethernet-ports");
1233 	if (!tmp_node)
1234 		return -ENOENT;
1235 	data->slaves = of_get_child_count(tmp_node);
1236 	if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1237 		of_node_put(tmp_node);
1238 		return -ENOENT;
1239 	}
1240 
1241 	data->active_slave = 0;
1242 	data->channels = CPSW_MAX_QUEUES;
1243 	data->dual_emac = true;
1244 	data->bd_ram_size = CPSW_BD_RAM_SIZE;
1245 	data->mac_control = 0;
1246 
1247 	data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1248 					sizeof(struct cpsw_slave_data),
1249 					GFP_KERNEL);
1250 	if (!data->slave_data)
1251 		return -ENOMEM;
1252 
1253 	/* Populate all the child nodes here...
1254 	 */
1255 	ret = devm_of_platform_populate(dev);
1256 	/* We do not want to force this, as in some cases may not have child */
1257 	if (ret)
1258 		dev_warn(dev, "Doesn't have any child node\n");
1259 
1260 	for_each_child_of_node(tmp_node, port_np) {
1261 		struct cpsw_slave_data *slave_data;
1262 		const void *mac_addr;
1263 		u32 port_id;
1264 
1265 		ret = of_property_read_u32(port_np, "reg", &port_id);
1266 		if (ret < 0) {
1267 			dev_err(dev, "%pOF error reading port_id %d\n",
1268 				port_np, ret);
1269 			goto err_node_put;
1270 		}
1271 
1272 		if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1273 			dev_err(dev, "%pOF has invalid port_id %u\n",
1274 				port_np, port_id);
1275 			ret = -EINVAL;
1276 			goto err_node_put;
1277 		}
1278 
1279 		slave_data = &data->slave_data[port_id - 1];
1280 
1281 		slave_data->disabled = !of_device_is_available(port_np);
1282 		if (slave_data->disabled)
1283 			continue;
1284 
1285 		slave_data->slave_node = port_np;
1286 		slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1287 		if (IS_ERR(slave_data->ifphy)) {
1288 			ret = PTR_ERR(slave_data->ifphy);
1289 			dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1290 				port_np, ret);
1291 			goto err_node_put;
1292 		}
1293 
1294 		if (of_phy_is_fixed_link(port_np)) {
1295 			ret = of_phy_register_fixed_link(port_np);
1296 			if (ret) {
1297 				if (ret != -EPROBE_DEFER)
1298 					dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1299 						port_np, ret);
1300 				goto err_node_put;
1301 			}
1302 			slave_data->phy_node = of_node_get(port_np);
1303 		} else {
1304 			slave_data->phy_node =
1305 				of_parse_phandle(port_np, "phy-handle", 0);
1306 		}
1307 
1308 		if (!slave_data->phy_node) {
1309 			dev_err(dev, "%pOF no phy found\n", port_np);
1310 			ret = -ENODEV;
1311 			goto err_node_put;
1312 		}
1313 
1314 		ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1315 		if (ret) {
1316 			dev_err(dev, "%pOF read phy-mode err %d\n",
1317 				port_np, ret);
1318 			goto err_node_put;
1319 		}
1320 
1321 		mac_addr = of_get_mac_address(port_np);
1322 		if (!IS_ERR(mac_addr)) {
1323 			ether_addr_copy(slave_data->mac_addr, mac_addr);
1324 		} else {
1325 			ret = ti_cm_get_macid(dev, port_id - 1,
1326 					      slave_data->mac_addr);
1327 			if (ret)
1328 				goto err_node_put;
1329 		}
1330 
1331 		if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1332 					 &prop)) {
1333 			dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1334 				port_np);
1335 			slave_data->dual_emac_res_vlan = port_id;
1336 			dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1337 				port_np, slave_data->dual_emac_res_vlan);
1338 		} else {
1339 			slave_data->dual_emac_res_vlan = prop;
1340 		}
1341 	}
1342 
1343 	of_node_put(tmp_node);
1344 	return 0;
1345 
1346 err_node_put:
1347 	of_node_put(port_np);
1348 	return ret;
1349 }
1350 
1351 static void cpsw_remove_dt(struct cpsw_common *cpsw)
1352 {
1353 	struct cpsw_platform_data *data = &cpsw->data;
1354 	int i = 0;
1355 
1356 	for (i = 0; i < cpsw->data.slaves; i++) {
1357 		struct cpsw_slave_data *slave_data = &data->slave_data[i];
1358 		struct device_node *port_np = slave_data->phy_node;
1359 
1360 		if (port_np) {
1361 			if (of_phy_is_fixed_link(port_np))
1362 				of_phy_deregister_fixed_link(port_np);
1363 
1364 			of_node_put(port_np);
1365 		}
1366 	}
1367 }
1368 
1369 static int cpsw_create_ports(struct cpsw_common *cpsw)
1370 {
1371 	struct cpsw_platform_data *data = &cpsw->data;
1372 	struct net_device *ndev, *napi_ndev = NULL;
1373 	struct device *dev = cpsw->dev;
1374 	struct cpsw_priv *priv;
1375 	int ret = 0, i = 0;
1376 
1377 	for (i = 0; i < cpsw->data.slaves; i++) {
1378 		struct cpsw_slave_data *slave_data = &data->slave_data[i];
1379 
1380 		if (slave_data->disabled)
1381 			continue;
1382 
1383 		ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1384 					       CPSW_MAX_QUEUES,
1385 					       CPSW_MAX_QUEUES);
1386 		if (!ndev) {
1387 			dev_err(dev, "error allocating net_device\n");
1388 			return -ENOMEM;
1389 		}
1390 
1391 		priv = netdev_priv(ndev);
1392 		priv->cpsw = cpsw;
1393 		priv->ndev = ndev;
1394 		priv->dev  = dev;
1395 		priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1396 		priv->emac_port = i + 1;
1397 
1398 		if (is_valid_ether_addr(slave_data->mac_addr)) {
1399 			ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1400 			dev_info(cpsw->dev, "Detected MACID = %pM\n",
1401 				 priv->mac_addr);
1402 		} else {
1403 			eth_random_addr(slave_data->mac_addr);
1404 			dev_info(cpsw->dev, "Random MACID = %pM\n",
1405 				 priv->mac_addr);
1406 		}
1407 		ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
1408 		ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1409 
1410 		cpsw->slaves[i].ndev = ndev;
1411 
1412 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1413 				  NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
1414 
1415 		ndev->netdev_ops = &cpsw_netdev_ops;
1416 		ndev->ethtool_ops = &cpsw_ethtool_ops;
1417 		SET_NETDEV_DEV(ndev, dev);
1418 
1419 		if (!napi_ndev) {
1420 			/* CPSW Host port CPDMA interface is shared between
1421 			 * ports and there is only one TX and one RX IRQs
1422 			 * available for all possible TX and RX channels
1423 			 * accordingly.
1424 			 */
1425 			netif_napi_add(ndev, &cpsw->napi_rx,
1426 				       cpsw->quirk_irq ?
1427 				       cpsw_rx_poll : cpsw_rx_mq_poll,
1428 				       CPSW_POLL_WEIGHT);
1429 			netif_tx_napi_add(ndev, &cpsw->napi_tx,
1430 					  cpsw->quirk_irq ?
1431 					  cpsw_tx_poll : cpsw_tx_mq_poll,
1432 					  CPSW_POLL_WEIGHT);
1433 		}
1434 
1435 		napi_ndev = ndev;
1436 	}
1437 
1438 	return ret;
1439 }
1440 
1441 static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1442 {
1443 	int i = 0;
1444 
1445 	for (i = 0; i < cpsw->data.slaves; i++) {
1446 		if (!cpsw->slaves[i].ndev)
1447 			continue;
1448 
1449 		unregister_netdev(cpsw->slaves[i].ndev);
1450 	}
1451 }
1452 
1453 static int cpsw_register_ports(struct cpsw_common *cpsw)
1454 {
1455 	int ret = 0, i = 0;
1456 
1457 	for (i = 0; i < cpsw->data.slaves; i++) {
1458 		if (!cpsw->slaves[i].ndev)
1459 			continue;
1460 
1461 		/* register the network device */
1462 		ret = register_netdev(cpsw->slaves[i].ndev);
1463 		if (ret) {
1464 			dev_err(cpsw->dev,
1465 				"cpsw: err registering net device%d\n", i);
1466 			cpsw->slaves[i].ndev = NULL;
1467 			break;
1468 		}
1469 	}
1470 
1471 	if (ret)
1472 		cpsw_unregister_ports(cpsw);
1473 	return ret;
1474 }
1475 
1476 bool cpsw_port_dev_check(const struct net_device *ndev)
1477 {
1478 	if (ndev->netdev_ops == &cpsw_netdev_ops) {
1479 		struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1480 
1481 		return !cpsw->data.dual_emac;
1482 	}
1483 
1484 	return false;
1485 }
1486 
1487 static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1488 {
1489 	int set_val = 0;
1490 	int i;
1491 
1492 	if (!cpsw->ale_bypass &&
1493 	    (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1494 		set_val = 1;
1495 
1496 	dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1497 
1498 	for (i = 0; i < cpsw->data.slaves; i++) {
1499 		struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1500 		struct cpsw_priv *priv = netdev_priv(sl_ndev);
1501 
1502 		priv->offload_fwd_mark = set_val;
1503 	}
1504 }
1505 
1506 static int cpsw_netdevice_port_link(struct net_device *ndev,
1507 				    struct net_device *br_ndev)
1508 {
1509 	struct cpsw_priv *priv = netdev_priv(ndev);
1510 	struct cpsw_common *cpsw = priv->cpsw;
1511 
1512 	if (!cpsw->br_members) {
1513 		cpsw->hw_bridge_dev = br_ndev;
1514 	} else {
1515 		/* This is adding the port to a second bridge, this is
1516 		 * unsupported
1517 		 */
1518 		if (cpsw->hw_bridge_dev != br_ndev)
1519 			return -EOPNOTSUPP;
1520 	}
1521 
1522 	cpsw->br_members |= BIT(priv->emac_port);
1523 
1524 	cpsw_port_offload_fwd_mark_update(cpsw);
1525 
1526 	return NOTIFY_DONE;
1527 }
1528 
1529 static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1530 {
1531 	struct cpsw_priv *priv = netdev_priv(ndev);
1532 	struct cpsw_common *cpsw = priv->cpsw;
1533 
1534 	cpsw->br_members &= ~BIT(priv->emac_port);
1535 
1536 	cpsw_port_offload_fwd_mark_update(cpsw);
1537 
1538 	if (!cpsw->br_members)
1539 		cpsw->hw_bridge_dev = NULL;
1540 }
1541 
1542 /* netdev notifier */
1543 static int cpsw_netdevice_event(struct notifier_block *unused,
1544 				unsigned long event, void *ptr)
1545 {
1546 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1547 	struct netdev_notifier_changeupper_info *info;
1548 	int ret = NOTIFY_DONE;
1549 
1550 	if (!cpsw_port_dev_check(ndev))
1551 		return NOTIFY_DONE;
1552 
1553 	switch (event) {
1554 	case NETDEV_CHANGEUPPER:
1555 		info = ptr;
1556 
1557 		if (netif_is_bridge_master(info->upper_dev)) {
1558 			if (info->linking)
1559 				ret = cpsw_netdevice_port_link(ndev,
1560 							       info->upper_dev);
1561 			else
1562 				cpsw_netdevice_port_unlink(ndev);
1563 		}
1564 		break;
1565 	default:
1566 		return NOTIFY_DONE;
1567 	}
1568 
1569 	return notifier_from_errno(ret);
1570 }
1571 
1572 static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1573 	.notifier_call = cpsw_netdevice_event,
1574 };
1575 
1576 static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1577 {
1578 	int ret = 0;
1579 
1580 	ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1581 	if (ret) {
1582 		dev_err(cpsw->dev, "can't register netdevice notifier\n");
1583 		return ret;
1584 	}
1585 
1586 	ret = cpsw_switchdev_register_notifiers(cpsw);
1587 	if (ret)
1588 		unregister_netdevice_notifier(&cpsw_netdevice_nb);
1589 
1590 	return ret;
1591 }
1592 
1593 static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1594 {
1595 	cpsw_switchdev_unregister_notifiers(cpsw);
1596 	unregister_netdevice_notifier(&cpsw_netdevice_nb);
1597 }
1598 
1599 static const struct devlink_ops cpsw_devlink_ops = {
1600 };
1601 
1602 static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1603 				   struct devlink_param_gset_ctx *ctx)
1604 {
1605 	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1606 	struct cpsw_common *cpsw = dl_priv->cpsw;
1607 
1608 	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1609 
1610 	if (id != CPSW_DL_PARAM_SWITCH_MODE)
1611 		return  -EOPNOTSUPP;
1612 
1613 	ctx->val.vbool = !cpsw->data.dual_emac;
1614 
1615 	return 0;
1616 }
1617 
1618 static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1619 				   struct devlink_param_gset_ctx *ctx)
1620 {
1621 	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1622 	struct cpsw_common *cpsw = dl_priv->cpsw;
1623 	int vlan = cpsw->data.default_vlan;
1624 	bool switch_en = ctx->val.vbool;
1625 	bool if_running = false;
1626 	int i;
1627 
1628 	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1629 
1630 	if (id != CPSW_DL_PARAM_SWITCH_MODE)
1631 		return  -EOPNOTSUPP;
1632 
1633 	if (switch_en == !cpsw->data.dual_emac)
1634 		return 0;
1635 
1636 	if (!switch_en && cpsw->br_members) {
1637 		dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1638 		return -EINVAL;
1639 	}
1640 
1641 	rtnl_lock();
1642 
1643 	for (i = 0; i < cpsw->data.slaves; i++) {
1644 		struct cpsw_slave *slave = &cpsw->slaves[i];
1645 		struct net_device *sl_ndev = slave->ndev;
1646 
1647 		if (!sl_ndev || !netif_running(sl_ndev))
1648 			continue;
1649 
1650 		if_running = true;
1651 	}
1652 
1653 	if (!if_running) {
1654 		/* all ndevs are down */
1655 		cpsw->data.dual_emac = !switch_en;
1656 		for (i = 0; i < cpsw->data.slaves; i++) {
1657 			struct cpsw_slave *slave = &cpsw->slaves[i];
1658 			struct net_device *sl_ndev = slave->ndev;
1659 
1660 			if (!sl_ndev)
1661 				continue;
1662 
1663 			if (switch_en)
1664 				vlan = cpsw->data.default_vlan;
1665 			else
1666 				vlan = slave->data->dual_emac_res_vlan;
1667 			slave->port_vlan = vlan;
1668 		}
1669 		goto exit;
1670 	}
1671 
1672 	if (switch_en) {
1673 		dev_info(cpsw->dev, "Enable switch mode\n");
1674 
1675 		/* enable bypass - no forwarding; all traffic goes to Host */
1676 		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1677 
1678 		/* clean up ALE table */
1679 		cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1680 		cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1681 
1682 		cpsw_init_host_port_switch(cpsw);
1683 
1684 		for (i = 0; i < cpsw->data.slaves; i++) {
1685 			struct cpsw_slave *slave = &cpsw->slaves[i];
1686 			struct net_device *sl_ndev = slave->ndev;
1687 			struct cpsw_priv *priv;
1688 
1689 			if (!sl_ndev)
1690 				continue;
1691 
1692 			priv = netdev_priv(sl_ndev);
1693 			slave->port_vlan = vlan;
1694 			if (netif_running(sl_ndev))
1695 				cpsw_port_add_switch_def_ale_entries(priv,
1696 								     slave);
1697 		}
1698 
1699 		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1700 		cpsw->data.dual_emac = false;
1701 	} else {
1702 		dev_info(cpsw->dev, "Disable switch mode\n");
1703 
1704 		/* enable bypass - no forwarding; all traffic goes to Host */
1705 		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1706 
1707 		cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1708 		cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1709 
1710 		cpsw_init_host_port_dual_mac(cpsw);
1711 
1712 		for (i = 0; i < cpsw->data.slaves; i++) {
1713 			struct cpsw_slave *slave = &cpsw->slaves[i];
1714 			struct net_device *sl_ndev = slave->ndev;
1715 			struct cpsw_priv *priv;
1716 
1717 			if (!sl_ndev)
1718 				continue;
1719 
1720 			priv = netdev_priv(slave->ndev);
1721 			slave->port_vlan = slave->data->dual_emac_res_vlan;
1722 			cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1723 		}
1724 
1725 		cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1726 		cpsw->data.dual_emac = true;
1727 	}
1728 exit:
1729 	rtnl_unlock();
1730 
1731 	return 0;
1732 }
1733 
1734 static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1735 				struct devlink_param_gset_ctx *ctx)
1736 {
1737 	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1738 	struct cpsw_common *cpsw = dl_priv->cpsw;
1739 
1740 	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1741 
1742 	switch (id) {
1743 	case CPSW_DL_PARAM_ALE_BYPASS:
1744 		ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1745 		break;
1746 	default:
1747 		return -EOPNOTSUPP;
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1754 				struct devlink_param_gset_ctx *ctx)
1755 {
1756 	struct cpsw_devlink *dl_priv = devlink_priv(dl);
1757 	struct cpsw_common *cpsw = dl_priv->cpsw;
1758 	int ret = -EOPNOTSUPP;
1759 
1760 	dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1761 
1762 	switch (id) {
1763 	case CPSW_DL_PARAM_ALE_BYPASS:
1764 		ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1765 					   ctx->val.vbool);
1766 		if (!ret) {
1767 			cpsw->ale_bypass = ctx->val.vbool;
1768 			cpsw_port_offload_fwd_mark_update(cpsw);
1769 		}
1770 		break;
1771 	default:
1772 		return -EOPNOTSUPP;
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 static const struct devlink_param cpsw_devlink_params[] = {
1779 	DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1780 			     "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1781 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1782 			     cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1783 			     NULL),
1784 	DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1785 			     "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1786 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1787 			     cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1788 };
1789 
1790 static int cpsw_register_devlink(struct cpsw_common *cpsw)
1791 {
1792 	struct device *dev = cpsw->dev;
1793 	struct cpsw_devlink *dl_priv;
1794 	int ret = 0;
1795 
1796 	cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
1797 	if (!cpsw->devlink)
1798 		return -ENOMEM;
1799 
1800 	dl_priv = devlink_priv(cpsw->devlink);
1801 	dl_priv->cpsw = cpsw;
1802 
1803 	ret = devlink_register(cpsw->devlink, dev);
1804 	if (ret) {
1805 		dev_err(dev, "DL reg fail ret:%d\n", ret);
1806 		goto dl_free;
1807 	}
1808 
1809 	ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1810 				      ARRAY_SIZE(cpsw_devlink_params));
1811 	if (ret) {
1812 		dev_err(dev, "DL params reg fail ret:%d\n", ret);
1813 		goto dl_unreg;
1814 	}
1815 
1816 	devlink_params_publish(cpsw->devlink);
1817 	return ret;
1818 
1819 dl_unreg:
1820 	devlink_unregister(cpsw->devlink);
1821 dl_free:
1822 	devlink_free(cpsw->devlink);
1823 	return ret;
1824 }
1825 
1826 static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1827 {
1828 	devlink_params_unpublish(cpsw->devlink);
1829 	devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1830 				  ARRAY_SIZE(cpsw_devlink_params));
1831 	devlink_unregister(cpsw->devlink);
1832 	devlink_free(cpsw->devlink);
1833 }
1834 
1835 static const struct of_device_id cpsw_of_mtable[] = {
1836 	{ .compatible = "ti,cpsw-switch"},
1837 	{ .compatible = "ti,am335x-cpsw-switch"},
1838 	{ .compatible = "ti,am4372-cpsw-switch"},
1839 	{ .compatible = "ti,dra7-cpsw-switch"},
1840 	{ /* sentinel */ },
1841 };
1842 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1843 
1844 static const struct soc_device_attribute cpsw_soc_devices[] = {
1845 	{ .family = "AM33xx", .revision = "ES1.0"},
1846 	{ /* sentinel */ }
1847 };
1848 
1849 static int cpsw_probe(struct platform_device *pdev)
1850 {
1851 	const struct soc_device_attribute *soc;
1852 	struct device *dev = &pdev->dev;
1853 	struct cpsw_common *cpsw;
1854 	struct resource *ss_res;
1855 	struct gpio_descs *mode;
1856 	void __iomem *ss_regs;
1857 	int ret = 0, ch;
1858 	struct clk *clk;
1859 	int irq;
1860 
1861 	cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1862 	if (!cpsw)
1863 		return -ENOMEM;
1864 
1865 	cpsw_slave_index = cpsw_slave_index_priv;
1866 
1867 	cpsw->dev = dev;
1868 
1869 	cpsw->slaves = devm_kcalloc(dev,
1870 				    CPSW_SLAVE_PORTS_NUM,
1871 				    sizeof(struct cpsw_slave),
1872 				    GFP_KERNEL);
1873 	if (!cpsw->slaves)
1874 		return -ENOMEM;
1875 
1876 	mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1877 	if (IS_ERR(mode)) {
1878 		ret = PTR_ERR(mode);
1879 		dev_err(dev, "gpio request failed, ret %d\n", ret);
1880 		return ret;
1881 	}
1882 
1883 	clk = devm_clk_get(dev, "fck");
1884 	if (IS_ERR(clk)) {
1885 		ret = PTR_ERR(clk);
1886 		dev_err(dev, "fck is not found %d\n", ret);
1887 		return ret;
1888 	}
1889 	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1890 
1891 	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1892 	ss_regs = devm_ioremap_resource(dev, ss_res);
1893 	if (IS_ERR(ss_regs)) {
1894 		ret = PTR_ERR(ss_regs);
1895 		return ret;
1896 	}
1897 	cpsw->regs = ss_regs;
1898 
1899 	irq = platform_get_irq_byname(pdev, "rx");
1900 	if (irq < 0)
1901 		return irq;
1902 	cpsw->irqs_table[0] = irq;
1903 
1904 	irq = platform_get_irq_byname(pdev, "tx");
1905 	if (irq < 0)
1906 		return irq;
1907 	cpsw->irqs_table[1] = irq;
1908 
1909 	irq = platform_get_irq_byname(pdev, "misc");
1910 	if (irq <= 0)
1911 		return irq;
1912 	cpsw->misc_irq = irq;
1913 
1914 	platform_set_drvdata(pdev, cpsw);
1915 	/* This may be required here for child devices. */
1916 	pm_runtime_enable(dev);
1917 
1918 	/* Need to enable clocks with runtime PM api to access module
1919 	 * registers
1920 	 */
1921 	ret = pm_runtime_get_sync(dev);
1922 	if (ret < 0) {
1923 		pm_runtime_put_noidle(dev);
1924 		pm_runtime_disable(dev);
1925 		return ret;
1926 	}
1927 
1928 	ret = cpsw_probe_dt(cpsw);
1929 	if (ret)
1930 		goto clean_dt_ret;
1931 
1932 	soc = soc_device_match(cpsw_soc_devices);
1933 	if (soc)
1934 		cpsw->quirk_irq = true;
1935 
1936 	cpsw->rx_packet_max = rx_packet_max;
1937 	cpsw->descs_pool_size = descs_pool_size;
1938 	eth_random_addr(cpsw->base_mac);
1939 
1940 	ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1941 			       (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1942 			       descs_pool_size);
1943 	if (ret)
1944 		goto clean_dt_ret;
1945 
1946 	cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1947 			ss_regs + CPSW1_WR_OFFSET :
1948 			ss_regs + CPSW2_WR_OFFSET;
1949 
1950 	ch = cpsw->quirk_irq ? 0 : 7;
1951 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1952 	if (IS_ERR(cpsw->txv[0].ch)) {
1953 		dev_err(dev, "error initializing tx dma channel\n");
1954 		ret = PTR_ERR(cpsw->txv[0].ch);
1955 		goto clean_cpts;
1956 	}
1957 
1958 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1959 	if (IS_ERR(cpsw->rxv[0].ch)) {
1960 		dev_err(dev, "error initializing rx dma channel\n");
1961 		ret = PTR_ERR(cpsw->rxv[0].ch);
1962 		goto clean_cpts;
1963 	}
1964 	cpsw_split_res(cpsw);
1965 
1966 	/* setup netdevs */
1967 	ret = cpsw_create_ports(cpsw);
1968 	if (ret)
1969 		goto clean_unregister_netdev;
1970 
1971 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
1972 	 * MISC IRQs which are always kept disabled with this driver so
1973 	 * we will not request them.
1974 	 *
1975 	 * If anyone wants to implement support for those, make sure to
1976 	 * first request and append them to irqs_table array.
1977 	 */
1978 
1979 	ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1980 			       0, dev_name(dev), cpsw);
1981 	if (ret < 0) {
1982 		dev_err(dev, "error attaching irq (%d)\n", ret);
1983 		goto clean_unregister_netdev;
1984 	}
1985 
1986 	ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
1987 			       0, dev_name(dev), cpsw);
1988 	if (ret < 0) {
1989 		dev_err(dev, "error attaching irq (%d)\n", ret);
1990 		goto clean_unregister_netdev;
1991 	}
1992 
1993 	if (!cpsw->cpts)
1994 		goto skip_cpts;
1995 
1996 	ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
1997 			       0, dev_name(&pdev->dev), cpsw);
1998 	if (ret < 0) {
1999 		dev_err(dev, "error attaching misc irq (%d)\n", ret);
2000 		goto clean_unregister_netdev;
2001 	}
2002 
2003 	/* Enable misc CPTS evnt_pend IRQ */
2004 	cpts_set_irqpoll(cpsw->cpts, false);
2005 
2006 skip_cpts:
2007 	ret = cpsw_register_notifiers(cpsw);
2008 	if (ret)
2009 		goto clean_unregister_netdev;
2010 
2011 	ret = cpsw_register_devlink(cpsw);
2012 	if (ret)
2013 		goto clean_unregister_notifiers;
2014 
2015 	ret = cpsw_register_ports(cpsw);
2016 	if (ret)
2017 		goto clean_unregister_notifiers;
2018 
2019 	dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2020 		   &ss_res->start, descs_pool_size,
2021 		   cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2022 		   CPSW_MINOR_VERSION(cpsw->version),
2023 		   CPSW_RTL_VERSION(cpsw->version));
2024 
2025 	pm_runtime_put(dev);
2026 
2027 	return 0;
2028 
2029 clean_unregister_notifiers:
2030 	cpsw_unregister_notifiers(cpsw);
2031 clean_unregister_netdev:
2032 	cpsw_unregister_ports(cpsw);
2033 clean_cpts:
2034 	cpts_release(cpsw->cpts);
2035 	cpdma_ctlr_destroy(cpsw->dma);
2036 clean_dt_ret:
2037 	cpsw_remove_dt(cpsw);
2038 	pm_runtime_put_sync(dev);
2039 	pm_runtime_disable(dev);
2040 	return ret;
2041 }
2042 
2043 static int cpsw_remove(struct platform_device *pdev)
2044 {
2045 	struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2046 	int ret;
2047 
2048 	ret = pm_runtime_get_sync(&pdev->dev);
2049 	if (ret < 0) {
2050 		pm_runtime_put_noidle(&pdev->dev);
2051 		return ret;
2052 	}
2053 
2054 	cpsw_unregister_notifiers(cpsw);
2055 	cpsw_unregister_devlink(cpsw);
2056 	cpsw_unregister_ports(cpsw);
2057 
2058 	cpts_release(cpsw->cpts);
2059 	cpdma_ctlr_destroy(cpsw->dma);
2060 	cpsw_remove_dt(cpsw);
2061 	pm_runtime_put_sync(&pdev->dev);
2062 	pm_runtime_disable(&pdev->dev);
2063 	return 0;
2064 }
2065 
2066 static int __maybe_unused cpsw_suspend(struct device *dev)
2067 {
2068 	struct cpsw_common *cpsw = dev_get_drvdata(dev);
2069 	int i;
2070 
2071 	rtnl_lock();
2072 
2073 	for (i = 0; i < cpsw->data.slaves; i++) {
2074 		struct net_device *ndev = cpsw->slaves[i].ndev;
2075 
2076 		if (!(ndev && netif_running(ndev)))
2077 			continue;
2078 
2079 		cpsw_ndo_stop(ndev);
2080 	}
2081 
2082 	rtnl_unlock();
2083 
2084 	/* Select sleep pin state */
2085 	pinctrl_pm_select_sleep_state(dev);
2086 
2087 	return 0;
2088 }
2089 
2090 static int __maybe_unused cpsw_resume(struct device *dev)
2091 {
2092 	struct cpsw_common *cpsw = dev_get_drvdata(dev);
2093 	int i;
2094 
2095 	/* Select default pin state */
2096 	pinctrl_pm_select_default_state(dev);
2097 
2098 	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2099 	rtnl_lock();
2100 
2101 	for (i = 0; i < cpsw->data.slaves; i++) {
2102 		struct net_device *ndev = cpsw->slaves[i].ndev;
2103 
2104 		if (!(ndev && netif_running(ndev)))
2105 			continue;
2106 
2107 		cpsw_ndo_open(ndev);
2108 	}
2109 
2110 	rtnl_unlock();
2111 
2112 	return 0;
2113 }
2114 
2115 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2116 
2117 static struct platform_driver cpsw_driver = {
2118 	.driver = {
2119 		.name	 = "cpsw-switch",
2120 		.pm	 = &cpsw_pm_ops,
2121 		.of_match_table = cpsw_of_mtable,
2122 	},
2123 	.probe = cpsw_probe,
2124 	.remove = cpsw_remove,
2125 };
2126 
2127 module_platform_driver(cpsw_driver);
2128 
2129 MODULE_LICENSE("GPL");
2130 MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
2131