xref: /openbmc/linux/drivers/net/ethernet/ti/cpsw.c (revision d003d772)
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/phy/phy.h>
30 #include <linux/workqueue.h>
31 #include <linux/delay.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/gpio/consumer.h>
34 #include <linux/of.h>
35 #include <linux/of_mdio.h>
36 #include <linux/of_net.h>
37 #include <linux/of_device.h>
38 #include <linux/if_vlan.h>
39 #include <linux/kmemleak.h>
40 #include <linux/sys_soc.h>
41 
42 #include <linux/pinctrl/consumer.h>
43 #include <net/pkt_cls.h>
44 
45 #include "cpsw.h"
46 #include "cpsw_ale.h"
47 #include "cpts.h"
48 #include "davinci_cpdma.h"
49 
50 #include <net/pkt_sched.h>
51 
52 #define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
53 			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
54 			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
55 			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
56 			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
57 			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
58 			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
59 			 NETIF_MSG_RX_STATUS)
60 
61 #define cpsw_info(priv, type, format, ...)		\
62 do {								\
63 	if (netif_msg_##type(priv) && net_ratelimit())		\
64 		dev_info(priv->dev, format, ## __VA_ARGS__);	\
65 } while (0)
66 
67 #define cpsw_err(priv, type, format, ...)		\
68 do {								\
69 	if (netif_msg_##type(priv) && net_ratelimit())		\
70 		dev_err(priv->dev, format, ## __VA_ARGS__);	\
71 } while (0)
72 
73 #define cpsw_dbg(priv, type, format, ...)		\
74 do {								\
75 	if (netif_msg_##type(priv) && net_ratelimit())		\
76 		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
77 } while (0)
78 
79 #define cpsw_notice(priv, type, format, ...)		\
80 do {								\
81 	if (netif_msg_##type(priv) && net_ratelimit())		\
82 		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
83 } while (0)
84 
85 #define ALE_ALL_PORTS		0x7
86 
87 #define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
88 #define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
89 #define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
90 
91 #define CPSW_VERSION_1		0x19010a
92 #define CPSW_VERSION_2		0x19010c
93 #define CPSW_VERSION_3		0x19010f
94 #define CPSW_VERSION_4		0x190112
95 
96 #define HOST_PORT_NUM		0
97 #define CPSW_ALE_PORTS_NUM	3
98 #define SLIVER_SIZE		0x40
99 
100 #define CPSW1_HOST_PORT_OFFSET	0x028
101 #define CPSW1_SLAVE_OFFSET	0x050
102 #define CPSW1_SLAVE_SIZE	0x040
103 #define CPSW1_CPDMA_OFFSET	0x100
104 #define CPSW1_STATERAM_OFFSET	0x200
105 #define CPSW1_HW_STATS		0x400
106 #define CPSW1_CPTS_OFFSET	0x500
107 #define CPSW1_ALE_OFFSET	0x600
108 #define CPSW1_SLIVER_OFFSET	0x700
109 
110 #define CPSW2_HOST_PORT_OFFSET	0x108
111 #define CPSW2_SLAVE_OFFSET	0x200
112 #define CPSW2_SLAVE_SIZE	0x100
113 #define CPSW2_CPDMA_OFFSET	0x800
114 #define CPSW2_HW_STATS		0x900
115 #define CPSW2_STATERAM_OFFSET	0xa00
116 #define CPSW2_CPTS_OFFSET	0xc00
117 #define CPSW2_ALE_OFFSET	0xd00
118 #define CPSW2_SLIVER_OFFSET	0xd80
119 #define CPSW2_BD_OFFSET		0x2000
120 
121 #define CPDMA_RXTHRESH		0x0c0
122 #define CPDMA_RXFREE		0x0e0
123 #define CPDMA_TXHDP		0x00
124 #define CPDMA_RXHDP		0x20
125 #define CPDMA_TXCP		0x40
126 #define CPDMA_RXCP		0x60
127 
128 #define CPSW_POLL_WEIGHT	64
129 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE		4
130 #define CPSW_MIN_PACKET_SIZE	(VLAN_ETH_ZLEN)
131 #define CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN +\
132 				 ETH_FCS_LEN +\
133 				 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
134 
135 #define RX_PRIORITY_MAPPING	0x76543210
136 #define TX_PRIORITY_MAPPING	0x33221100
137 #define CPDMA_TX_PRIORITY_MAP	0x76543210
138 
139 #define CPSW_VLAN_AWARE		BIT(1)
140 #define CPSW_RX_VLAN_ENCAP	BIT(2)
141 #define CPSW_ALE_VLAN_AWARE	1
142 
143 #define CPSW_FIFO_NORMAL_MODE		(0 << 16)
144 #define CPSW_FIFO_DUAL_MAC_MODE		(1 << 16)
145 #define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 16)
146 
147 #define CPSW_INTPACEEN		(0x3f << 16)
148 #define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
149 #define CPSW_CMINTMAX_CNT	63
150 #define CPSW_CMINTMIN_CNT	2
151 #define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
152 #define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)
153 
154 #define cpsw_slave_index(cpsw, priv)				\
155 		((cpsw->data.dual_emac) ? priv->emac_port :	\
156 		cpsw->data.active_slave)
157 #define IRQ_NUM			2
158 #define CPSW_MAX_QUEUES		8
159 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
160 #define CPSW_FIFO_QUEUE_TYPE_SHIFT	16
161 #define CPSW_FIFO_SHAPE_EN_SHIFT	16
162 #define CPSW_FIFO_RATE_EN_SHIFT		20
163 #define CPSW_TC_NUM			4
164 #define CPSW_FIFO_SHAPERS_NUM		(CPSW_TC_NUM - 1)
165 #define CPSW_PCT_MASK			0x7f
166 
167 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT	29
168 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK		GENMASK(2, 0)
169 #define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT	16
170 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT	8
171 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK	GENMASK(1, 0)
172 enum {
173 	CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
174 	CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
175 	CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
176 	CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
177 };
178 
179 static int debug_level;
180 module_param(debug_level, int, 0);
181 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
182 
183 static int ale_ageout = 10;
184 module_param(ale_ageout, int, 0);
185 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
186 
187 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
188 module_param(rx_packet_max, int, 0);
189 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
190 
191 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
192 module_param(descs_pool_size, int, 0444);
193 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
194 
195 struct cpsw_wr_regs {
196 	u32	id_ver;
197 	u32	soft_reset;
198 	u32	control;
199 	u32	int_control;
200 	u32	rx_thresh_en;
201 	u32	rx_en;
202 	u32	tx_en;
203 	u32	misc_en;
204 	u32	mem_allign1[8];
205 	u32	rx_thresh_stat;
206 	u32	rx_stat;
207 	u32	tx_stat;
208 	u32	misc_stat;
209 	u32	mem_allign2[8];
210 	u32	rx_imax;
211 	u32	tx_imax;
212 
213 };
214 
215 struct cpsw_ss_regs {
216 	u32	id_ver;
217 	u32	control;
218 	u32	soft_reset;
219 	u32	stat_port_en;
220 	u32	ptype;
221 	u32	soft_idle;
222 	u32	thru_rate;
223 	u32	gap_thresh;
224 	u32	tx_start_wds;
225 	u32	flow_control;
226 	u32	vlan_ltype;
227 	u32	ts_ltype;
228 	u32	dlr_ltype;
229 };
230 
231 /* CPSW_PORT_V1 */
232 #define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
233 #define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
234 #define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
235 #define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
236 #define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
237 #define CPSW1_TS_CTL        0x14 /* Time Sync Control */
238 #define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
239 #define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
240 
241 /* CPSW_PORT_V2 */
242 #define CPSW2_CONTROL       0x00 /* Control Register */
243 #define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
244 #define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
245 #define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
246 #define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
247 #define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
248 #define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
249 
250 /* CPSW_PORT_V1 and V2 */
251 #define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
252 #define SA_HI               0x24 /* CPGMAC_SL Source Address High */
253 #define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
254 
255 /* CPSW_PORT_V2 only */
256 #define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
257 #define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
258 #define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
259 #define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
260 #define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
261 #define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
262 #define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
263 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
264 
265 /* Bit definitions for the CPSW2_CONTROL register */
266 #define PASS_PRI_TAGGED     BIT(24) /* Pass Priority Tagged */
267 #define VLAN_LTYPE2_EN      BIT(21) /* VLAN LTYPE 2 enable */
268 #define VLAN_LTYPE1_EN      BIT(20) /* VLAN LTYPE 1 enable */
269 #define DSCP_PRI_EN         BIT(16) /* DSCP Priority Enable */
270 #define TS_107              BIT(15) /* Tyme Sync Dest IP Address 107 */
271 #define TS_320              BIT(14) /* Time Sync Dest Port 320 enable */
272 #define TS_319              BIT(13) /* Time Sync Dest Port 319 enable */
273 #define TS_132              BIT(12) /* Time Sync Dest IP Addr 132 enable */
274 #define TS_131              BIT(11) /* Time Sync Dest IP Addr 131 enable */
275 #define TS_130              BIT(10) /* Time Sync Dest IP Addr 130 enable */
276 #define TS_129              BIT(9)  /* Time Sync Dest IP Addr 129 enable */
277 #define TS_TTL_NONZERO      BIT(8)  /* Time Sync Time To Live Non-zero enable */
278 #define TS_ANNEX_F_EN       BIT(6)  /* Time Sync Annex F enable */
279 #define TS_ANNEX_D_EN       BIT(4)  /* Time Sync Annex D enable */
280 #define TS_LTYPE2_EN        BIT(3)  /* Time Sync LTYPE 2 enable */
281 #define TS_LTYPE1_EN        BIT(2)  /* Time Sync LTYPE 1 enable */
282 #define TS_TX_EN            BIT(1)  /* Time Sync Transmit Enable */
283 #define TS_RX_EN            BIT(0)  /* Time Sync Receive Enable */
284 
285 #define CTRL_V2_TS_BITS \
286 	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
287 	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
288 
289 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
290 #define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
291 #define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
292 
293 
294 #define CTRL_V3_TS_BITS \
295 	(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
296 	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
297 	 TS_LTYPE1_EN | VLAN_LTYPE1_EN)
298 
299 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
300 #define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
301 #define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
302 
303 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
304 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
305 #define TS_SEQ_ID_OFFSET_MASK    (0x3f)
306 #define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
307 #define TS_MSG_TYPE_EN_MASK      (0xffff)
308 
309 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
310 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
311 
312 /* Bit definitions for the CPSW1_TS_CTL register */
313 #define CPSW_V1_TS_RX_EN		BIT(0)
314 #define CPSW_V1_TS_TX_EN		BIT(4)
315 #define CPSW_V1_MSG_TYPE_OFS		16
316 
317 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
318 #define CPSW_V1_SEQ_ID_OFS_SHIFT	16
319 
320 #define CPSW_MAX_BLKS_TX		15
321 #define CPSW_MAX_BLKS_TX_SHIFT		4
322 #define CPSW_MAX_BLKS_RX		5
323 
324 struct cpsw_host_regs {
325 	u32	max_blks;
326 	u32	blk_cnt;
327 	u32	tx_in_ctl;
328 	u32	port_vlan;
329 	u32	tx_pri_map;
330 	u32	cpdma_tx_pri_map;
331 	u32	cpdma_rx_chan_map;
332 };
333 
334 struct cpsw_sliver_regs {
335 	u32	id_ver;
336 	u32	mac_control;
337 	u32	mac_status;
338 	u32	soft_reset;
339 	u32	rx_maxlen;
340 	u32	__reserved_0;
341 	u32	rx_pause;
342 	u32	tx_pause;
343 	u32	__reserved_1;
344 	u32	rx_pri_map;
345 };
346 
347 struct cpsw_hw_stats {
348 	u32	rxgoodframes;
349 	u32	rxbroadcastframes;
350 	u32	rxmulticastframes;
351 	u32	rxpauseframes;
352 	u32	rxcrcerrors;
353 	u32	rxaligncodeerrors;
354 	u32	rxoversizedframes;
355 	u32	rxjabberframes;
356 	u32	rxundersizedframes;
357 	u32	rxfragments;
358 	u32	__pad_0[2];
359 	u32	rxoctets;
360 	u32	txgoodframes;
361 	u32	txbroadcastframes;
362 	u32	txmulticastframes;
363 	u32	txpauseframes;
364 	u32	txdeferredframes;
365 	u32	txcollisionframes;
366 	u32	txsinglecollframes;
367 	u32	txmultcollframes;
368 	u32	txexcessivecollisions;
369 	u32	txlatecollisions;
370 	u32	txunderrun;
371 	u32	txcarriersenseerrors;
372 	u32	txoctets;
373 	u32	octetframes64;
374 	u32	octetframes65t127;
375 	u32	octetframes128t255;
376 	u32	octetframes256t511;
377 	u32	octetframes512t1023;
378 	u32	octetframes1024tup;
379 	u32	netoctets;
380 	u32	rxsofoverruns;
381 	u32	rxmofoverruns;
382 	u32	rxdmaoverruns;
383 };
384 
385 struct cpsw_slave_data {
386 	struct device_node *phy_node;
387 	char		phy_id[MII_BUS_ID_SIZE];
388 	int		phy_if;
389 	u8		mac_addr[ETH_ALEN];
390 	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
391 	struct phy	*ifphy;
392 };
393 
394 struct cpsw_platform_data {
395 	struct cpsw_slave_data	*slave_data;
396 	u32	ss_reg_ofs;	/* Subsystem control register offset */
397 	u32	channels;	/* number of cpdma channels (symmetric) */
398 	u32	slaves;		/* number of slave cpgmac ports */
399 	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
400 	u32	ale_entries;	/* ale table size */
401 	u32	bd_ram_size;  /*buffer descriptor ram size */
402 	u32	mac_control;	/* Mac control register */
403 	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
404 	bool	dual_emac;	/* Enable Dual EMAC mode */
405 };
406 
407 struct cpsw_slave {
408 	void __iomem			*regs;
409 	struct cpsw_sliver_regs __iomem	*sliver;
410 	int				slave_num;
411 	u32				mac_control;
412 	struct cpsw_slave_data		*data;
413 	struct phy_device		*phy;
414 	struct net_device		*ndev;
415 	u32				port_vlan;
416 };
417 
418 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
419 {
420 	return readl_relaxed(slave->regs + offset);
421 }
422 
423 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
424 {
425 	writel_relaxed(val, slave->regs + offset);
426 }
427 
428 struct cpsw_vector {
429 	struct cpdma_chan *ch;
430 	int budget;
431 };
432 
433 struct cpsw_common {
434 	struct device			*dev;
435 	struct cpsw_platform_data	data;
436 	struct napi_struct		napi_rx;
437 	struct napi_struct		napi_tx;
438 	struct cpsw_ss_regs __iomem	*regs;
439 	struct cpsw_wr_regs __iomem	*wr_regs;
440 	u8 __iomem			*hw_stats;
441 	struct cpsw_host_regs __iomem	*host_port_regs;
442 	u32				version;
443 	u32				coal_intvl;
444 	u32				bus_freq_mhz;
445 	int				rx_packet_max;
446 	struct cpsw_slave		*slaves;
447 	struct cpdma_ctlr		*dma;
448 	struct cpsw_vector		txv[CPSW_MAX_QUEUES];
449 	struct cpsw_vector		rxv[CPSW_MAX_QUEUES];
450 	struct cpsw_ale			*ale;
451 	bool				quirk_irq;
452 	bool				rx_irq_disabled;
453 	bool				tx_irq_disabled;
454 	u32 irqs_table[IRQ_NUM];
455 	struct cpts			*cpts;
456 	int				rx_ch_num, tx_ch_num;
457 	int				speed;
458 	int				usage_count;
459 };
460 
461 struct cpsw_priv {
462 	struct net_device		*ndev;
463 	struct device			*dev;
464 	u32				msg_enable;
465 	u8				mac_addr[ETH_ALEN];
466 	bool				rx_pause;
467 	bool				tx_pause;
468 	bool				mqprio_hw;
469 	int				fifo_bw[CPSW_TC_NUM];
470 	int				shp_cfg_speed;
471 	int				tx_ts_enabled;
472 	int				rx_ts_enabled;
473 	u32 emac_port;
474 	struct cpsw_common *cpsw;
475 };
476 
477 struct cpsw_stats {
478 	char stat_string[ETH_GSTRING_LEN];
479 	int type;
480 	int sizeof_stat;
481 	int stat_offset;
482 };
483 
484 enum {
485 	CPSW_STATS,
486 	CPDMA_RX_STATS,
487 	CPDMA_TX_STATS,
488 };
489 
490 #define CPSW_STAT(m)		CPSW_STATS,				\
491 				FIELD_SIZEOF(struct cpsw_hw_stats, m), \
492 				offsetof(struct cpsw_hw_stats, m)
493 #define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
494 				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
495 				offsetof(struct cpdma_chan_stats, m)
496 #define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
497 				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
498 				offsetof(struct cpdma_chan_stats, m)
499 
500 static const struct cpsw_stats cpsw_gstrings_stats[] = {
501 	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
502 	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
503 	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
504 	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
505 	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
506 	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
507 	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
508 	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
509 	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
510 	{ "Rx Fragments", CPSW_STAT(rxfragments) },
511 	{ "Rx Octets", CPSW_STAT(rxoctets) },
512 	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
513 	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
514 	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
515 	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
516 	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
517 	{ "Collisions", CPSW_STAT(txcollisionframes) },
518 	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
519 	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
520 	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
521 	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
522 	{ "Tx Underrun", CPSW_STAT(txunderrun) },
523 	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
524 	{ "Tx Octets", CPSW_STAT(txoctets) },
525 	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
526 	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
527 	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
528 	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
529 	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
530 	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
531 	{ "Net Octets", CPSW_STAT(netoctets) },
532 	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
533 	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
534 	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
535 };
536 
537 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
538 	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
539 	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
540 	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
541 	{ "misqueued", CPDMA_RX_STAT(misqueued) },
542 	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
543 	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
544 	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
545 	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
546 	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
547 	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
548 	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
549 	{ "requeue", CPDMA_RX_STAT(requeue) },
550 	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
551 };
552 
553 #define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
554 #define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
555 
556 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
557 #define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
558 #define for_each_slave(priv, func, arg...)				\
559 	do {								\
560 		struct cpsw_slave *slave;				\
561 		struct cpsw_common *cpsw = (priv)->cpsw;		\
562 		int n;							\
563 		if (cpsw->data.dual_emac)				\
564 			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
565 		else							\
566 			for (n = cpsw->data.slaves,			\
567 					slave = cpsw->slaves;		\
568 					n; n--)				\
569 				(func)(slave++, ##arg);			\
570 	} while (0)
571 
572 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
573 				    __be16 proto, u16 vid);
574 
575 static inline int cpsw_get_slave_port(u32 slave_num)
576 {
577 	return slave_num + 1;
578 }
579 
580 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
581 {
582 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
583 	struct cpsw_ale *ale = cpsw->ale;
584 	int i;
585 
586 	if (cpsw->data.dual_emac) {
587 		bool flag = false;
588 
589 		/* Enabling promiscuous mode for one interface will be
590 		 * common for both the interface as the interface shares
591 		 * the same hardware resource.
592 		 */
593 		for (i = 0; i < cpsw->data.slaves; i++)
594 			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
595 				flag = true;
596 
597 		if (!enable && flag) {
598 			enable = true;
599 			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
600 		}
601 
602 		if (enable) {
603 			/* Enable Bypass */
604 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
605 
606 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
607 		} else {
608 			/* Disable Bypass */
609 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
610 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
611 		}
612 	} else {
613 		if (enable) {
614 			unsigned long timeout = jiffies + HZ;
615 
616 			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
617 			for (i = 0; i <= cpsw->data.slaves; i++) {
618 				cpsw_ale_control_set(ale, i,
619 						     ALE_PORT_NOLEARN, 1);
620 				cpsw_ale_control_set(ale, i,
621 						     ALE_PORT_NO_SA_UPDATE, 1);
622 			}
623 
624 			/* Clear All Untouched entries */
625 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
626 			do {
627 				cpu_relax();
628 				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
629 					break;
630 			} while (time_after(timeout, jiffies));
631 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
632 
633 			/* Clear all mcast from ALE */
634 			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
635 			__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
636 
637 			/* Flood All Unicast Packets to Host port */
638 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
639 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
640 		} else {
641 			/* Don't Flood All Unicast Packets to Host port */
642 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
643 
644 			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
645 			for (i = 0; i <= cpsw->data.slaves; i++) {
646 				cpsw_ale_control_set(ale, i,
647 						     ALE_PORT_NOLEARN, 0);
648 				cpsw_ale_control_set(ale, i,
649 						     ALE_PORT_NO_SA_UPDATE, 0);
650 			}
651 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
652 		}
653 	}
654 }
655 
656 struct addr_sync_ctx {
657 	struct net_device *ndev;
658 	const u8 *addr;		/* address to be synched */
659 	int consumed;		/* number of address instances */
660 	int flush;		/* flush flag */
661 };
662 
663 /**
664  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
665  * if it's not deleted
666  * @ndev: device to sync
667  * @addr: address to be added or deleted
668  * @vid: vlan id, if vid < 0 set/unset address for real device
669  * @add: add address if the flag is set or remove otherwise
670  */
671 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
672 		       int vid, int add)
673 {
674 	struct cpsw_priv *priv = netdev_priv(ndev);
675 	struct cpsw_common *cpsw = priv->cpsw;
676 	int mask, flags, ret;
677 
678 	if (vid < 0) {
679 		if (cpsw->data.dual_emac)
680 			vid = cpsw->slaves[priv->emac_port].port_vlan;
681 		else
682 			vid = 0;
683 	}
684 
685 	mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
686 	flags = vid ? ALE_VLAN : 0;
687 
688 	if (add)
689 		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
690 	else
691 		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
692 
693 	return ret;
694 }
695 
696 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
697 {
698 	struct addr_sync_ctx *sync_ctx = ctx;
699 	struct netdev_hw_addr *ha;
700 	int found = 0, ret = 0;
701 
702 	if (!vdev || !(vdev->flags & IFF_UP))
703 		return 0;
704 
705 	/* vlan address is relevant if its sync_cnt != 0 */
706 	netdev_for_each_mc_addr(ha, vdev) {
707 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
708 			found = ha->sync_cnt;
709 			break;
710 		}
711 	}
712 
713 	if (found)
714 		sync_ctx->consumed++;
715 
716 	if (sync_ctx->flush) {
717 		if (!found)
718 			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
719 		return 0;
720 	}
721 
722 	if (found)
723 		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
724 
725 	return ret;
726 }
727 
728 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
729 {
730 	struct addr_sync_ctx sync_ctx;
731 	int ret;
732 
733 	sync_ctx.consumed = 0;
734 	sync_ctx.addr = addr;
735 	sync_ctx.ndev = ndev;
736 	sync_ctx.flush = 0;
737 
738 	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
739 	if (sync_ctx.consumed < num && !ret)
740 		ret = cpsw_set_mc(ndev, addr, -1, 1);
741 
742 	return ret;
743 }
744 
745 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
746 {
747 	struct addr_sync_ctx sync_ctx;
748 
749 	sync_ctx.consumed = 0;
750 	sync_ctx.addr = addr;
751 	sync_ctx.ndev = ndev;
752 	sync_ctx.flush = 1;
753 
754 	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
755 	if (sync_ctx.consumed == num)
756 		cpsw_set_mc(ndev, addr, -1, 0);
757 
758 	return 0;
759 }
760 
761 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
762 {
763 	struct addr_sync_ctx *sync_ctx = ctx;
764 	struct netdev_hw_addr *ha;
765 	int found = 0;
766 
767 	if (!vdev || !(vdev->flags & IFF_UP))
768 		return 0;
769 
770 	/* vlan address is relevant if its sync_cnt != 0 */
771 	netdev_for_each_mc_addr(ha, vdev) {
772 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
773 			found = ha->sync_cnt;
774 			break;
775 		}
776 	}
777 
778 	if (!found)
779 		return 0;
780 
781 	sync_ctx->consumed++;
782 	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
783 	return 0;
784 }
785 
786 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
787 {
788 	struct addr_sync_ctx sync_ctx;
789 
790 	sync_ctx.addr = addr;
791 	sync_ctx.ndev = ndev;
792 	sync_ctx.consumed = 0;
793 
794 	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
795 	if (sync_ctx.consumed < num)
796 		cpsw_set_mc(ndev, addr, -1, 0);
797 
798 	return 0;
799 }
800 
801 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
802 {
803 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
804 
805 	if (ndev->flags & IFF_PROMISC) {
806 		/* Enable promiscuous mode */
807 		cpsw_set_promiscious(ndev, true);
808 		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
809 		return;
810 	} else {
811 		/* Disable promiscuous mode */
812 		cpsw_set_promiscious(ndev, false);
813 	}
814 
815 	/* Restore allmulti on vlans if necessary */
816 	cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
817 
818 	/* add/remove mcast address either for real netdev or for vlan */
819 	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
820 			       cpsw_del_mc_addr);
821 }
822 
823 static void cpsw_intr_enable(struct cpsw_common *cpsw)
824 {
825 	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
826 	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
827 
828 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
829 	return;
830 }
831 
832 static void cpsw_intr_disable(struct cpsw_common *cpsw)
833 {
834 	writel_relaxed(0, &cpsw->wr_regs->tx_en);
835 	writel_relaxed(0, &cpsw->wr_regs->rx_en);
836 
837 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
838 	return;
839 }
840 
841 static void cpsw_tx_handler(void *token, int len, int status)
842 {
843 	struct netdev_queue	*txq;
844 	struct sk_buff		*skb = token;
845 	struct net_device	*ndev = skb->dev;
846 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
847 
848 	/* Check whether the queue is stopped due to stalled tx dma, if the
849 	 * queue is stopped then start the queue as we have free desc for tx
850 	 */
851 	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
852 	if (unlikely(netif_tx_queue_stopped(txq)))
853 		netif_tx_wake_queue(txq);
854 
855 	cpts_tx_timestamp(cpsw->cpts, skb);
856 	ndev->stats.tx_packets++;
857 	ndev->stats.tx_bytes += len;
858 	dev_kfree_skb_any(skb);
859 }
860 
861 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
862 {
863 	struct cpsw_priv *priv = netdev_priv(skb->dev);
864 	struct cpsw_common *cpsw = priv->cpsw;
865 	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
866 	u16 vtag, vid, prio, pkt_type;
867 
868 	/* Remove VLAN header encapsulation word */
869 	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
870 
871 	pkt_type = (rx_vlan_encap_hdr >>
872 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
873 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
874 	/* Ignore unknown & Priority-tagged packets*/
875 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
876 	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
877 		return;
878 
879 	vid = (rx_vlan_encap_hdr >>
880 	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
881 	       VLAN_VID_MASK;
882 	/* Ignore vid 0 and pass packet as is */
883 	if (!vid)
884 		return;
885 	/* Ignore default vlans in dual mac mode */
886 	if (cpsw->data.dual_emac &&
887 	    vid == cpsw->slaves[priv->emac_port].port_vlan)
888 		return;
889 
890 	prio = (rx_vlan_encap_hdr >>
891 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
892 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
893 
894 	vtag = (prio << VLAN_PRIO_SHIFT) | vid;
895 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
896 
897 	/* strip vlan tag for VLAN-tagged packet */
898 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
899 		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
900 		skb_pull(skb, VLAN_HLEN);
901 	}
902 }
903 
904 static void cpsw_rx_handler(void *token, int len, int status)
905 {
906 	struct cpdma_chan	*ch;
907 	struct sk_buff		*skb = token;
908 	struct sk_buff		*new_skb;
909 	struct net_device	*ndev = skb->dev;
910 	int			ret = 0, port;
911 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
912 	struct cpsw_priv	*priv;
913 
914 	if (cpsw->data.dual_emac) {
915 		port = CPDMA_RX_SOURCE_PORT(status);
916 		if (port) {
917 			ndev = cpsw->slaves[--port].ndev;
918 			skb->dev = ndev;
919 		}
920 	}
921 
922 	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
923 		/* In dual emac mode check for all interfaces */
924 		if (cpsw->data.dual_emac && cpsw->usage_count &&
925 		    (status >= 0)) {
926 			/* The packet received is for the interface which
927 			 * is already down and the other interface is up
928 			 * and running, instead of freeing which results
929 			 * in reducing of the number of rx descriptor in
930 			 * DMA engine, requeue skb back to cpdma.
931 			 */
932 			new_skb = skb;
933 			goto requeue;
934 		}
935 
936 		/* the interface is going down, skbs are purged */
937 		dev_kfree_skb_any(skb);
938 		return;
939 	}
940 
941 	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
942 	if (new_skb) {
943 		skb_copy_queue_mapping(new_skb, skb);
944 		skb_put(skb, len);
945 		if (status & CPDMA_RX_VLAN_ENCAP)
946 			cpsw_rx_vlan_encap(skb);
947 		priv = netdev_priv(ndev);
948 		if (priv->rx_ts_enabled)
949 			cpts_rx_timestamp(cpsw->cpts, skb);
950 		skb->protocol = eth_type_trans(skb, ndev);
951 		netif_receive_skb(skb);
952 		ndev->stats.rx_bytes += len;
953 		ndev->stats.rx_packets++;
954 		kmemleak_not_leak(new_skb);
955 	} else {
956 		ndev->stats.rx_dropped++;
957 		new_skb = skb;
958 	}
959 
960 requeue:
961 	if (netif_dormant(ndev)) {
962 		dev_kfree_skb_any(new_skb);
963 		return;
964 	}
965 
966 	ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
967 	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
968 				skb_tailroom(new_skb), 0);
969 	if (WARN_ON(ret < 0))
970 		dev_kfree_skb_any(new_skb);
971 }
972 
973 static void cpsw_split_res(struct net_device *ndev)
974 {
975 	struct cpsw_priv *priv = netdev_priv(ndev);
976 	u32 consumed_rate = 0, bigest_rate = 0;
977 	struct cpsw_common *cpsw = priv->cpsw;
978 	struct cpsw_vector *txv = cpsw->txv;
979 	int i, ch_weight, rlim_ch_num = 0;
980 	int budget, bigest_rate_ch = 0;
981 	u32 ch_rate, max_rate;
982 	int ch_budget = 0;
983 
984 	for (i = 0; i < cpsw->tx_ch_num; i++) {
985 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
986 		if (!ch_rate)
987 			continue;
988 
989 		rlim_ch_num++;
990 		consumed_rate += ch_rate;
991 	}
992 
993 	if (cpsw->tx_ch_num == rlim_ch_num) {
994 		max_rate = consumed_rate;
995 	} else if (!rlim_ch_num) {
996 		ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
997 		bigest_rate = 0;
998 		max_rate = consumed_rate;
999 	} else {
1000 		max_rate = cpsw->speed * 1000;
1001 
1002 		/* if max_rate is less then expected due to reduced link speed,
1003 		 * split proportionally according next potential max speed
1004 		 */
1005 		if (max_rate < consumed_rate)
1006 			max_rate *= 10;
1007 
1008 		if (max_rate < consumed_rate)
1009 			max_rate *= 10;
1010 
1011 		ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
1012 		ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
1013 			    (cpsw->tx_ch_num - rlim_ch_num);
1014 		bigest_rate = (max_rate - consumed_rate) /
1015 			      (cpsw->tx_ch_num - rlim_ch_num);
1016 	}
1017 
1018 	/* split tx weight/budget */
1019 	budget = CPSW_POLL_WEIGHT;
1020 	for (i = 0; i < cpsw->tx_ch_num; i++) {
1021 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
1022 		if (ch_rate) {
1023 			txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
1024 			if (!txv[i].budget)
1025 				txv[i].budget++;
1026 			if (ch_rate > bigest_rate) {
1027 				bigest_rate_ch = i;
1028 				bigest_rate = ch_rate;
1029 			}
1030 
1031 			ch_weight = (ch_rate * 100) / max_rate;
1032 			if (!ch_weight)
1033 				ch_weight++;
1034 			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
1035 		} else {
1036 			txv[i].budget = ch_budget;
1037 			if (!bigest_rate_ch)
1038 				bigest_rate_ch = i;
1039 			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
1040 		}
1041 
1042 		budget -= txv[i].budget;
1043 	}
1044 
1045 	if (budget)
1046 		txv[bigest_rate_ch].budget += budget;
1047 
1048 	/* split rx budget */
1049 	budget = CPSW_POLL_WEIGHT;
1050 	ch_budget = budget / cpsw->rx_ch_num;
1051 	for (i = 0; i < cpsw->rx_ch_num; i++) {
1052 		cpsw->rxv[i].budget = ch_budget;
1053 		budget -= ch_budget;
1054 	}
1055 
1056 	if (budget)
1057 		cpsw->rxv[0].budget += budget;
1058 }
1059 
1060 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
1061 {
1062 	struct cpsw_common *cpsw = dev_id;
1063 
1064 	writel(0, &cpsw->wr_regs->tx_en);
1065 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
1066 
1067 	if (cpsw->quirk_irq) {
1068 		disable_irq_nosync(cpsw->irqs_table[1]);
1069 		cpsw->tx_irq_disabled = true;
1070 	}
1071 
1072 	napi_schedule(&cpsw->napi_tx);
1073 	return IRQ_HANDLED;
1074 }
1075 
1076 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
1077 {
1078 	struct cpsw_common *cpsw = dev_id;
1079 
1080 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
1081 	writel(0, &cpsw->wr_regs->rx_en);
1082 
1083 	if (cpsw->quirk_irq) {
1084 		disable_irq_nosync(cpsw->irqs_table[0]);
1085 		cpsw->rx_irq_disabled = true;
1086 	}
1087 
1088 	napi_schedule(&cpsw->napi_rx);
1089 	return IRQ_HANDLED;
1090 }
1091 
1092 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
1093 {
1094 	u32			ch_map;
1095 	int			num_tx, cur_budget, ch;
1096 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
1097 	struct cpsw_vector	*txv;
1098 
1099 	/* process every unprocessed channel */
1100 	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
1101 	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
1102 		if (!(ch_map & 0x80))
1103 			continue;
1104 
1105 		txv = &cpsw->txv[ch];
1106 		if (unlikely(txv->budget > budget - num_tx))
1107 			cur_budget = budget - num_tx;
1108 		else
1109 			cur_budget = txv->budget;
1110 
1111 		num_tx += cpdma_chan_process(txv->ch, cur_budget);
1112 		if (num_tx >= budget)
1113 			break;
1114 	}
1115 
1116 	if (num_tx < budget) {
1117 		napi_complete(napi_tx);
1118 		writel(0xff, &cpsw->wr_regs->tx_en);
1119 	}
1120 
1121 	return num_tx;
1122 }
1123 
1124 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1125 {
1126 	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1127 	int num_tx;
1128 
1129 	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1130 	if (num_tx < budget) {
1131 		napi_complete(napi_tx);
1132 		writel(0xff, &cpsw->wr_regs->tx_en);
1133 		if (cpsw->tx_irq_disabled) {
1134 			cpsw->tx_irq_disabled = false;
1135 			enable_irq(cpsw->irqs_table[1]);
1136 		}
1137 	}
1138 
1139 	return num_tx;
1140 }
1141 
1142 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
1143 {
1144 	u32			ch_map;
1145 	int			num_rx, cur_budget, ch;
1146 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
1147 	struct cpsw_vector	*rxv;
1148 
1149 	/* process every unprocessed channel */
1150 	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
1151 	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
1152 		if (!(ch_map & 0x01))
1153 			continue;
1154 
1155 		rxv = &cpsw->rxv[ch];
1156 		if (unlikely(rxv->budget > budget - num_rx))
1157 			cur_budget = budget - num_rx;
1158 		else
1159 			cur_budget = rxv->budget;
1160 
1161 		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
1162 		if (num_rx >= budget)
1163 			break;
1164 	}
1165 
1166 	if (num_rx < budget) {
1167 		napi_complete_done(napi_rx, num_rx);
1168 		writel(0xff, &cpsw->wr_regs->rx_en);
1169 	}
1170 
1171 	return num_rx;
1172 }
1173 
1174 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1175 {
1176 	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1177 	int num_rx;
1178 
1179 	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1180 	if (num_rx < budget) {
1181 		napi_complete_done(napi_rx, num_rx);
1182 		writel(0xff, &cpsw->wr_regs->rx_en);
1183 		if (cpsw->rx_irq_disabled) {
1184 			cpsw->rx_irq_disabled = false;
1185 			enable_irq(cpsw->irqs_table[0]);
1186 		}
1187 	}
1188 
1189 	return num_rx;
1190 }
1191 
1192 static inline void soft_reset(const char *module, void __iomem *reg)
1193 {
1194 	unsigned long timeout = jiffies + HZ;
1195 
1196 	writel_relaxed(1, reg);
1197 	do {
1198 		cpu_relax();
1199 	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1200 
1201 	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1202 }
1203 
1204 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1205 			       struct cpsw_priv *priv)
1206 {
1207 	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1208 	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1209 }
1210 
1211 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1212 {
1213 	struct cpsw_common *cpsw = priv->cpsw;
1214 	struct cpsw_slave *slave;
1215 	u32 shift, mask, val;
1216 
1217 	val = readl_relaxed(&cpsw->regs->ptype);
1218 
1219 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1220 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1221 	mask = 7 << shift;
1222 	val = val & mask;
1223 
1224 	return !val;
1225 }
1226 
1227 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1228 {
1229 	struct cpsw_common *cpsw = priv->cpsw;
1230 	struct cpsw_slave *slave;
1231 	u32 shift, mask, val;
1232 
1233 	val = readl_relaxed(&cpsw->regs->ptype);
1234 
1235 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1236 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1237 	mask = (1 << --fifo) << shift;
1238 	val = on ? val | mask : val & ~mask;
1239 
1240 	writel_relaxed(val, &cpsw->regs->ptype);
1241 }
1242 
1243 static void _cpsw_adjust_link(struct cpsw_slave *slave,
1244 			      struct cpsw_priv *priv, bool *link)
1245 {
1246 	struct phy_device	*phy = slave->phy;
1247 	u32			mac_control = 0;
1248 	u32			slave_port;
1249 	struct cpsw_common *cpsw = priv->cpsw;
1250 
1251 	if (!phy)
1252 		return;
1253 
1254 	slave_port = cpsw_get_slave_port(slave->slave_num);
1255 
1256 	if (phy->link) {
1257 		mac_control = cpsw->data.mac_control;
1258 
1259 		/* enable forwarding */
1260 		cpsw_ale_control_set(cpsw->ale, slave_port,
1261 				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1262 
1263 		if (phy->speed == 1000)
1264 			mac_control |= BIT(7);	/* GIGABITEN	*/
1265 		if (phy->duplex)
1266 			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
1267 
1268 		/* set speed_in input in case RMII mode is used in 100Mbps */
1269 		if (phy->speed == 100)
1270 			mac_control |= BIT(15);
1271 		/* in band mode only works in 10Mbps RGMII mode */
1272 		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1273 			mac_control |= BIT(18); /* In Band mode */
1274 
1275 		if (priv->rx_pause)
1276 			mac_control |= BIT(3);
1277 
1278 		if (priv->tx_pause)
1279 			mac_control |= BIT(4);
1280 
1281 		*link = true;
1282 
1283 		if (priv->shp_cfg_speed &&
1284 		    priv->shp_cfg_speed != slave->phy->speed &&
1285 		    !cpsw_shp_is_off(priv))
1286 			dev_warn(priv->dev,
1287 				 "Speed was changed, CBS shaper speeds are changed!");
1288 	} else {
1289 		mac_control = 0;
1290 		/* disable forwarding */
1291 		cpsw_ale_control_set(cpsw->ale, slave_port,
1292 				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1293 	}
1294 
1295 	if (mac_control != slave->mac_control) {
1296 		phy_print_status(phy);
1297 		writel_relaxed(mac_control, &slave->sliver->mac_control);
1298 	}
1299 
1300 	slave->mac_control = mac_control;
1301 }
1302 
1303 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1304 {
1305 	int i, speed;
1306 
1307 	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1308 		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1309 			speed += cpsw->slaves[i].phy->speed;
1310 
1311 	return speed;
1312 }
1313 
1314 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1315 {
1316 	int i, rlim_ch_num;
1317 	int speed, ch_rate;
1318 
1319 	/* re-split resources only in case speed was changed */
1320 	speed = cpsw_get_common_speed(cpsw);
1321 	if (speed == cpsw->speed || !speed)
1322 		return 0;
1323 
1324 	cpsw->speed = speed;
1325 
1326 	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1327 		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1328 		if (!ch_rate)
1329 			break;
1330 
1331 		rlim_ch_num++;
1332 	}
1333 
1334 	/* cases not dependent on speed */
1335 	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1336 		return 0;
1337 
1338 	return 1;
1339 }
1340 
1341 static void cpsw_adjust_link(struct net_device *ndev)
1342 {
1343 	struct cpsw_priv	*priv = netdev_priv(ndev);
1344 	struct cpsw_common	*cpsw = priv->cpsw;
1345 	bool			link = false;
1346 
1347 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1348 
1349 	if (link) {
1350 		if (cpsw_need_resplit(cpsw))
1351 			cpsw_split_res(ndev);
1352 
1353 		netif_carrier_on(ndev);
1354 		if (netif_running(ndev))
1355 			netif_tx_wake_all_queues(ndev);
1356 	} else {
1357 		netif_carrier_off(ndev);
1358 		netif_tx_stop_all_queues(ndev);
1359 	}
1360 }
1361 
1362 static int cpsw_get_coalesce(struct net_device *ndev,
1363 				struct ethtool_coalesce *coal)
1364 {
1365 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1366 
1367 	coal->rx_coalesce_usecs = cpsw->coal_intvl;
1368 	return 0;
1369 }
1370 
1371 static int cpsw_set_coalesce(struct net_device *ndev,
1372 				struct ethtool_coalesce *coal)
1373 {
1374 	struct cpsw_priv *priv = netdev_priv(ndev);
1375 	u32 int_ctrl;
1376 	u32 num_interrupts = 0;
1377 	u32 prescale = 0;
1378 	u32 addnl_dvdr = 1;
1379 	u32 coal_intvl = 0;
1380 	struct cpsw_common *cpsw = priv->cpsw;
1381 
1382 	coal_intvl = coal->rx_coalesce_usecs;
1383 
1384 	int_ctrl =  readl(&cpsw->wr_regs->int_control);
1385 	prescale = cpsw->bus_freq_mhz * 4;
1386 
1387 	if (!coal->rx_coalesce_usecs) {
1388 		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1389 		goto update_return;
1390 	}
1391 
1392 	if (coal_intvl < CPSW_CMINTMIN_INTVL)
1393 		coal_intvl = CPSW_CMINTMIN_INTVL;
1394 
1395 	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1396 		/* Interrupt pacer works with 4us Pulse, we can
1397 		 * throttle further by dilating the 4us pulse.
1398 		 */
1399 		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1400 
1401 		if (addnl_dvdr > 1) {
1402 			prescale *= addnl_dvdr;
1403 			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1404 				coal_intvl = (CPSW_CMINTMAX_INTVL
1405 						* addnl_dvdr);
1406 		} else {
1407 			addnl_dvdr = 1;
1408 			coal_intvl = CPSW_CMINTMAX_INTVL;
1409 		}
1410 	}
1411 
1412 	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1413 	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1414 	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1415 
1416 	int_ctrl |= CPSW_INTPACEEN;
1417 	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1418 	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1419 
1420 update_return:
1421 	writel(int_ctrl, &cpsw->wr_regs->int_control);
1422 
1423 	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1424 	cpsw->coal_intvl = coal_intvl;
1425 
1426 	return 0;
1427 }
1428 
1429 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1430 {
1431 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1432 
1433 	switch (sset) {
1434 	case ETH_SS_STATS:
1435 		return (CPSW_STATS_COMMON_LEN +
1436 		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1437 		       CPSW_STATS_CH_LEN);
1438 	default:
1439 		return -EOPNOTSUPP;
1440 	}
1441 }
1442 
1443 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1444 {
1445 	int ch_stats_len;
1446 	int line;
1447 	int i;
1448 
1449 	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1450 	for (i = 0; i < ch_stats_len; i++) {
1451 		line = i % CPSW_STATS_CH_LEN;
1452 		snprintf(*p, ETH_GSTRING_LEN,
1453 			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1454 			 (long)(i / CPSW_STATS_CH_LEN),
1455 			 cpsw_gstrings_ch_stats[line].stat_string);
1456 		*p += ETH_GSTRING_LEN;
1457 	}
1458 }
1459 
1460 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1461 {
1462 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1463 	u8 *p = data;
1464 	int i;
1465 
1466 	switch (stringset) {
1467 	case ETH_SS_STATS:
1468 		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1469 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
1470 			       ETH_GSTRING_LEN);
1471 			p += ETH_GSTRING_LEN;
1472 		}
1473 
1474 		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1475 		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1476 		break;
1477 	}
1478 }
1479 
1480 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1481 				    struct ethtool_stats *stats, u64 *data)
1482 {
1483 	u8 *p;
1484 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1485 	struct cpdma_chan_stats ch_stats;
1486 	int i, l, ch;
1487 
1488 	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
1489 	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1490 		data[l] = readl(cpsw->hw_stats +
1491 				cpsw_gstrings_stats[l].stat_offset);
1492 
1493 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1494 		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1495 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1496 			p = (u8 *)&ch_stats +
1497 				cpsw_gstrings_ch_stats[i].stat_offset;
1498 			data[l] = *(u32 *)p;
1499 		}
1500 	}
1501 
1502 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1503 		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1504 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1505 			p = (u8 *)&ch_stats +
1506 				cpsw_gstrings_ch_stats[i].stat_offset;
1507 			data[l] = *(u32 *)p;
1508 		}
1509 	}
1510 }
1511 
1512 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1513 					struct sk_buff *skb,
1514 					struct cpdma_chan *txch)
1515 {
1516 	struct cpsw_common *cpsw = priv->cpsw;
1517 
1518 	skb_tx_timestamp(skb);
1519 	return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1520 				 priv->emac_port + cpsw->data.dual_emac);
1521 }
1522 
1523 static inline void cpsw_add_dual_emac_def_ale_entries(
1524 		struct cpsw_priv *priv, struct cpsw_slave *slave,
1525 		u32 slave_port)
1526 {
1527 	struct cpsw_common *cpsw = priv->cpsw;
1528 	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1529 
1530 	if (cpsw->version == CPSW_VERSION_1)
1531 		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1532 	else
1533 		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1534 	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1535 			  port_mask, port_mask, 0);
1536 	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1537 			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
1538 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1539 			   HOST_PORT_NUM, ALE_VLAN |
1540 			   ALE_SECURE, slave->port_vlan);
1541 	cpsw_ale_control_set(cpsw->ale, slave_port,
1542 			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1543 }
1544 
1545 static void soft_reset_slave(struct cpsw_slave *slave)
1546 {
1547 	char name[32];
1548 
1549 	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1550 	soft_reset(name, &slave->sliver->soft_reset);
1551 }
1552 
1553 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1554 {
1555 	u32 slave_port;
1556 	struct phy_device *phy;
1557 	struct cpsw_common *cpsw = priv->cpsw;
1558 
1559 	soft_reset_slave(slave);
1560 
1561 	/* setup priority mapping */
1562 	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1563 
1564 	switch (cpsw->version) {
1565 	case CPSW_VERSION_1:
1566 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1567 		/* Increase RX FIFO size to 5 for supporting fullduplex
1568 		 * flow control mode
1569 		 */
1570 		slave_write(slave,
1571 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1572 			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1573 		break;
1574 	case CPSW_VERSION_2:
1575 	case CPSW_VERSION_3:
1576 	case CPSW_VERSION_4:
1577 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1578 		/* Increase RX FIFO size to 5 for supporting fullduplex
1579 		 * flow control mode
1580 		 */
1581 		slave_write(slave,
1582 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1583 			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1584 		break;
1585 	}
1586 
1587 	/* setup max packet size, and mac address */
1588 	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1589 	cpsw_set_slave_mac(slave, priv);
1590 
1591 	slave->mac_control = 0;	/* no link yet */
1592 
1593 	slave_port = cpsw_get_slave_port(slave->slave_num);
1594 
1595 	if (cpsw->data.dual_emac)
1596 		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1597 	else
1598 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1599 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1600 
1601 	if (slave->data->phy_node) {
1602 		phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1603 				 &cpsw_adjust_link, 0, slave->data->phy_if);
1604 		if (!phy) {
1605 			dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1606 				slave->data->phy_node,
1607 				slave->slave_num);
1608 			return;
1609 		}
1610 	} else {
1611 		phy = phy_connect(priv->ndev, slave->data->phy_id,
1612 				 &cpsw_adjust_link, slave->data->phy_if);
1613 		if (IS_ERR(phy)) {
1614 			dev_err(priv->dev,
1615 				"phy \"%s\" not found on slave %d, err %ld\n",
1616 				slave->data->phy_id, slave->slave_num,
1617 				PTR_ERR(phy));
1618 			return;
1619 		}
1620 	}
1621 
1622 	slave->phy = phy;
1623 
1624 	phy_attached_info(slave->phy);
1625 
1626 	phy_start(slave->phy);
1627 
1628 	/* Configure GMII_SEL register */
1629 	if (!IS_ERR(slave->data->ifphy))
1630 		phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
1631 				 slave->data->phy_if);
1632 	else
1633 		cpsw_phy_sel(cpsw->dev, slave->phy->interface,
1634 			     slave->slave_num);
1635 }
1636 
1637 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1638 {
1639 	struct cpsw_common *cpsw = priv->cpsw;
1640 	const int vlan = cpsw->data.default_vlan;
1641 	u32 reg;
1642 	int i;
1643 	int unreg_mcast_mask;
1644 
1645 	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1646 	       CPSW2_PORT_VLAN;
1647 
1648 	writel(vlan, &cpsw->host_port_regs->port_vlan);
1649 
1650 	for (i = 0; i < cpsw->data.slaves; i++)
1651 		slave_write(cpsw->slaves + i, vlan, reg);
1652 
1653 	if (priv->ndev->flags & IFF_ALLMULTI)
1654 		unreg_mcast_mask = ALE_ALL_PORTS;
1655 	else
1656 		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1657 
1658 	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1659 			  ALE_ALL_PORTS, ALE_ALL_PORTS,
1660 			  unreg_mcast_mask);
1661 }
1662 
1663 static void cpsw_init_host_port(struct cpsw_priv *priv)
1664 {
1665 	u32 fifo_mode;
1666 	u32 control_reg;
1667 	struct cpsw_common *cpsw = priv->cpsw;
1668 
1669 	/* soft reset the controller and initialize ale */
1670 	soft_reset("cpsw", &cpsw->regs->soft_reset);
1671 	cpsw_ale_start(cpsw->ale);
1672 
1673 	/* switch to vlan unaware mode */
1674 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1675 			     CPSW_ALE_VLAN_AWARE);
1676 	control_reg = readl(&cpsw->regs->control);
1677 	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1678 	writel(control_reg, &cpsw->regs->control);
1679 	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1680 		     CPSW_FIFO_NORMAL_MODE;
1681 	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1682 
1683 	/* setup host port priority mapping */
1684 	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1685 		       &cpsw->host_port_regs->cpdma_tx_pri_map);
1686 	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1687 
1688 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1689 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1690 
1691 	if (!cpsw->data.dual_emac) {
1692 		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1693 				   0, 0);
1694 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1695 				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1696 	}
1697 }
1698 
1699 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1700 {
1701 	struct cpsw_common *cpsw = priv->cpsw;
1702 	struct sk_buff *skb;
1703 	int ch_buf_num;
1704 	int ch, i, ret;
1705 
1706 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1707 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1708 		for (i = 0; i < ch_buf_num; i++) {
1709 			skb = __netdev_alloc_skb_ip_align(priv->ndev,
1710 							  cpsw->rx_packet_max,
1711 							  GFP_KERNEL);
1712 			if (!skb) {
1713 				cpsw_err(priv, ifup, "cannot allocate skb\n");
1714 				return -ENOMEM;
1715 			}
1716 
1717 			skb_set_queue_mapping(skb, ch);
1718 			ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1719 						skb->data, skb_tailroom(skb),
1720 						0);
1721 			if (ret < 0) {
1722 				cpsw_err(priv, ifup,
1723 					 "cannot submit skb to channel %d rx, error %d\n",
1724 					 ch, ret);
1725 				kfree_skb(skb);
1726 				return ret;
1727 			}
1728 			kmemleak_not_leak(skb);
1729 		}
1730 
1731 		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1732 			  ch, ch_buf_num);
1733 	}
1734 
1735 	return 0;
1736 }
1737 
1738 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1739 {
1740 	u32 slave_port;
1741 
1742 	slave_port = cpsw_get_slave_port(slave->slave_num);
1743 
1744 	if (!slave->phy)
1745 		return;
1746 	phy_stop(slave->phy);
1747 	phy_disconnect(slave->phy);
1748 	slave->phy = NULL;
1749 	cpsw_ale_control_set(cpsw->ale, slave_port,
1750 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1751 	soft_reset_slave(slave);
1752 }
1753 
1754 static int cpsw_tc_to_fifo(int tc, int num_tc)
1755 {
1756 	if (tc == num_tc - 1)
1757 		return 0;
1758 
1759 	return CPSW_FIFO_SHAPERS_NUM - tc;
1760 }
1761 
1762 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1763 {
1764 	struct cpsw_common *cpsw = priv->cpsw;
1765 	u32 val = 0, send_pct, shift;
1766 	struct cpsw_slave *slave;
1767 	int pct = 0, i;
1768 
1769 	if (bw > priv->shp_cfg_speed * 1000)
1770 		goto err;
1771 
1772 	/* shaping has to stay enabled for highest fifos linearly
1773 	 * and fifo bw no more then interface can allow
1774 	 */
1775 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1776 	send_pct = slave_read(slave, SEND_PERCENT);
1777 	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1778 		if (!bw) {
1779 			if (i >= fifo || !priv->fifo_bw[i])
1780 				continue;
1781 
1782 			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1783 			continue;
1784 		}
1785 
1786 		if (!priv->fifo_bw[i] && i > fifo) {
1787 			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1788 			return -EINVAL;
1789 		}
1790 
1791 		shift = (i - 1) * 8;
1792 		if (i == fifo) {
1793 			send_pct &= ~(CPSW_PCT_MASK << shift);
1794 			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1795 			if (!val)
1796 				val = 1;
1797 
1798 			send_pct |= val << shift;
1799 			pct += val;
1800 			continue;
1801 		}
1802 
1803 		if (priv->fifo_bw[i])
1804 			pct += (send_pct >> shift) & CPSW_PCT_MASK;
1805 	}
1806 
1807 	if (pct >= 100)
1808 		goto err;
1809 
1810 	slave_write(slave, send_pct, SEND_PERCENT);
1811 	priv->fifo_bw[fifo] = bw;
1812 
1813 	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1814 		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1815 
1816 	return 0;
1817 err:
1818 	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1819 	return -EINVAL;
1820 }
1821 
1822 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1823 {
1824 	struct cpsw_common *cpsw = priv->cpsw;
1825 	struct cpsw_slave *slave;
1826 	u32 tx_in_ctl_rg, val;
1827 	int ret;
1828 
1829 	ret = cpsw_set_fifo_bw(priv, fifo, bw);
1830 	if (ret)
1831 		return ret;
1832 
1833 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1834 	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1835 		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1836 
1837 	if (!bw)
1838 		cpsw_fifo_shp_on(priv, fifo, bw);
1839 
1840 	val = slave_read(slave, tx_in_ctl_rg);
1841 	if (cpsw_shp_is_off(priv)) {
1842 		/* disable FIFOs rate limited queues */
1843 		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1844 
1845 		/* set type of FIFO queues to normal priority mode */
1846 		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1847 
1848 		/* set type of FIFO queues to be rate limited */
1849 		if (bw)
1850 			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1851 		else
1852 			priv->shp_cfg_speed = 0;
1853 	}
1854 
1855 	/* toggle a FIFO rate limited queue */
1856 	if (bw)
1857 		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1858 	else
1859 		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1860 	slave_write(slave, val, tx_in_ctl_rg);
1861 
1862 	/* FIFO transmit shape enable */
1863 	cpsw_fifo_shp_on(priv, fifo, bw);
1864 	return 0;
1865 }
1866 
1867 /* Defaults:
1868  * class A - prio 3
1869  * class B - prio 2
1870  * shaping for class A should be set first
1871  */
1872 static int cpsw_set_cbs(struct net_device *ndev,
1873 			struct tc_cbs_qopt_offload *qopt)
1874 {
1875 	struct cpsw_priv *priv = netdev_priv(ndev);
1876 	struct cpsw_common *cpsw = priv->cpsw;
1877 	struct cpsw_slave *slave;
1878 	int prev_speed = 0;
1879 	int tc, ret, fifo;
1880 	u32 bw = 0;
1881 
1882 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1883 
1884 	/* enable channels in backward order, as highest FIFOs must be rate
1885 	 * limited first and for compliance with CPDMA rate limited channels
1886 	 * that also used in bacward order. FIFO0 cannot be rate limited.
1887 	 */
1888 	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1889 	if (!fifo) {
1890 		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1891 		return -EINVAL;
1892 	}
1893 
1894 	/* do nothing, it's disabled anyway */
1895 	if (!qopt->enable && !priv->fifo_bw[fifo])
1896 		return 0;
1897 
1898 	/* shapers can be set if link speed is known */
1899 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1900 	if (slave->phy && slave->phy->link) {
1901 		if (priv->shp_cfg_speed &&
1902 		    priv->shp_cfg_speed != slave->phy->speed)
1903 			prev_speed = priv->shp_cfg_speed;
1904 
1905 		priv->shp_cfg_speed = slave->phy->speed;
1906 	}
1907 
1908 	if (!priv->shp_cfg_speed) {
1909 		dev_err(priv->dev, "Link speed is not known");
1910 		return -1;
1911 	}
1912 
1913 	ret = pm_runtime_get_sync(cpsw->dev);
1914 	if (ret < 0) {
1915 		pm_runtime_put_noidle(cpsw->dev);
1916 		return ret;
1917 	}
1918 
1919 	bw = qopt->enable ? qopt->idleslope : 0;
1920 	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1921 	if (ret) {
1922 		priv->shp_cfg_speed = prev_speed;
1923 		prev_speed = 0;
1924 	}
1925 
1926 	if (bw && prev_speed)
1927 		dev_warn(priv->dev,
1928 			 "Speed was changed, CBS shaper speeds are changed!");
1929 
1930 	pm_runtime_put_sync(cpsw->dev);
1931 	return ret;
1932 }
1933 
1934 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1935 {
1936 	int fifo, bw;
1937 
1938 	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1939 		bw = priv->fifo_bw[fifo];
1940 		if (!bw)
1941 			continue;
1942 
1943 		cpsw_set_fifo_rlimit(priv, fifo, bw);
1944 	}
1945 }
1946 
1947 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1948 {
1949 	struct cpsw_common *cpsw = priv->cpsw;
1950 	u32 tx_prio_map = 0;
1951 	int i, tc, fifo;
1952 	u32 tx_prio_rg;
1953 
1954 	if (!priv->mqprio_hw)
1955 		return;
1956 
1957 	for (i = 0; i < 8; i++) {
1958 		tc = netdev_get_prio_tc_map(priv->ndev, i);
1959 		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1960 		tx_prio_map |= fifo << (4 * i);
1961 	}
1962 
1963 	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1964 		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1965 
1966 	slave_write(slave, tx_prio_map, tx_prio_rg);
1967 }
1968 
1969 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1970 {
1971 	struct cpsw_priv *priv = arg;
1972 
1973 	if (!vdev)
1974 		return 0;
1975 
1976 	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1977 	return 0;
1978 }
1979 
1980 /* restore resources after port reset */
1981 static void cpsw_restore(struct cpsw_priv *priv)
1982 {
1983 	/* restore vlan configurations */
1984 	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1985 
1986 	/* restore MQPRIO offload */
1987 	for_each_slave(priv, cpsw_mqprio_resume, priv);
1988 
1989 	/* restore CBS offload */
1990 	for_each_slave(priv, cpsw_cbs_resume, priv);
1991 }
1992 
1993 static int cpsw_ndo_open(struct net_device *ndev)
1994 {
1995 	struct cpsw_priv *priv = netdev_priv(ndev);
1996 	struct cpsw_common *cpsw = priv->cpsw;
1997 	int ret;
1998 	u32 reg;
1999 
2000 	ret = pm_runtime_get_sync(cpsw->dev);
2001 	if (ret < 0) {
2002 		pm_runtime_put_noidle(cpsw->dev);
2003 		return ret;
2004 	}
2005 
2006 	netif_carrier_off(ndev);
2007 
2008 	/* Notify the stack of the actual queue counts. */
2009 	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
2010 	if (ret) {
2011 		dev_err(priv->dev, "cannot set real number of tx queues\n");
2012 		goto err_cleanup;
2013 	}
2014 
2015 	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
2016 	if (ret) {
2017 		dev_err(priv->dev, "cannot set real number of rx queues\n");
2018 		goto err_cleanup;
2019 	}
2020 
2021 	reg = cpsw->version;
2022 
2023 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
2024 		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
2025 		 CPSW_RTL_VERSION(reg));
2026 
2027 	/* Initialize host and slave ports */
2028 	if (!cpsw->usage_count)
2029 		cpsw_init_host_port(priv);
2030 	for_each_slave(priv, cpsw_slave_open, priv);
2031 
2032 	/* Add default VLAN */
2033 	if (!cpsw->data.dual_emac)
2034 		cpsw_add_default_vlan(priv);
2035 	else
2036 		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
2037 				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
2038 
2039 	/* initialize shared resources for every ndev */
2040 	if (!cpsw->usage_count) {
2041 		/* disable priority elevation */
2042 		writel_relaxed(0, &cpsw->regs->ptype);
2043 
2044 		/* enable statistics collection only on all ports */
2045 		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
2046 
2047 		/* Enable internal fifo flow control */
2048 		writel(0x7, &cpsw->regs->flow_control);
2049 
2050 		napi_enable(&cpsw->napi_rx);
2051 		napi_enable(&cpsw->napi_tx);
2052 
2053 		if (cpsw->tx_irq_disabled) {
2054 			cpsw->tx_irq_disabled = false;
2055 			enable_irq(cpsw->irqs_table[1]);
2056 		}
2057 
2058 		if (cpsw->rx_irq_disabled) {
2059 			cpsw->rx_irq_disabled = false;
2060 			enable_irq(cpsw->irqs_table[0]);
2061 		}
2062 
2063 		ret = cpsw_fill_rx_channels(priv);
2064 		if (ret < 0)
2065 			goto err_cleanup;
2066 
2067 		if (cpts_register(cpsw->cpts))
2068 			dev_err(priv->dev, "error registering cpts device\n");
2069 
2070 	}
2071 
2072 	cpsw_restore(priv);
2073 
2074 	/* Enable Interrupt pacing if configured */
2075 	if (cpsw->coal_intvl != 0) {
2076 		struct ethtool_coalesce coal;
2077 
2078 		coal.rx_coalesce_usecs = cpsw->coal_intvl;
2079 		cpsw_set_coalesce(ndev, &coal);
2080 	}
2081 
2082 	cpdma_ctlr_start(cpsw->dma);
2083 	cpsw_intr_enable(cpsw);
2084 	cpsw->usage_count++;
2085 
2086 	return 0;
2087 
2088 err_cleanup:
2089 	cpdma_ctlr_stop(cpsw->dma);
2090 	for_each_slave(priv, cpsw_slave_stop, cpsw);
2091 	pm_runtime_put_sync(cpsw->dev);
2092 	netif_carrier_off(priv->ndev);
2093 	return ret;
2094 }
2095 
2096 static int cpsw_ndo_stop(struct net_device *ndev)
2097 {
2098 	struct cpsw_priv *priv = netdev_priv(ndev);
2099 	struct cpsw_common *cpsw = priv->cpsw;
2100 
2101 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
2102 	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
2103 	netif_tx_stop_all_queues(priv->ndev);
2104 	netif_carrier_off(priv->ndev);
2105 
2106 	if (cpsw->usage_count <= 1) {
2107 		napi_disable(&cpsw->napi_rx);
2108 		napi_disable(&cpsw->napi_tx);
2109 		cpts_unregister(cpsw->cpts);
2110 		cpsw_intr_disable(cpsw);
2111 		cpdma_ctlr_stop(cpsw->dma);
2112 		cpsw_ale_stop(cpsw->ale);
2113 	}
2114 	for_each_slave(priv, cpsw_slave_stop, cpsw);
2115 
2116 	if (cpsw_need_resplit(cpsw))
2117 		cpsw_split_res(ndev);
2118 
2119 	cpsw->usage_count--;
2120 	pm_runtime_put_sync(cpsw->dev);
2121 	return 0;
2122 }
2123 
2124 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2125 				       struct net_device *ndev)
2126 {
2127 	struct cpsw_priv *priv = netdev_priv(ndev);
2128 	struct cpsw_common *cpsw = priv->cpsw;
2129 	struct cpts *cpts = cpsw->cpts;
2130 	struct netdev_queue *txq;
2131 	struct cpdma_chan *txch;
2132 	int ret, q_idx;
2133 
2134 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
2135 		cpsw_err(priv, tx_err, "packet pad failed\n");
2136 		ndev->stats.tx_dropped++;
2137 		return NET_XMIT_DROP;
2138 	}
2139 
2140 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2141 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
2142 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2143 
2144 	q_idx = skb_get_queue_mapping(skb);
2145 	if (q_idx >= cpsw->tx_ch_num)
2146 		q_idx = q_idx % cpsw->tx_ch_num;
2147 
2148 	txch = cpsw->txv[q_idx].ch;
2149 	txq = netdev_get_tx_queue(ndev, q_idx);
2150 	ret = cpsw_tx_packet_submit(priv, skb, txch);
2151 	if (unlikely(ret != 0)) {
2152 		cpsw_err(priv, tx_err, "desc submit failed\n");
2153 		goto fail;
2154 	}
2155 
2156 	/* If there is no more tx desc left free then we need to
2157 	 * tell the kernel to stop sending us tx frames.
2158 	 */
2159 	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2160 		netif_tx_stop_queue(txq);
2161 
2162 		/* Barrier, so that stop_queue visible to other cpus */
2163 		smp_mb__after_atomic();
2164 
2165 		if (cpdma_check_free_tx_desc(txch))
2166 			netif_tx_wake_queue(txq);
2167 	}
2168 
2169 	return NETDEV_TX_OK;
2170 fail:
2171 	ndev->stats.tx_dropped++;
2172 	netif_tx_stop_queue(txq);
2173 
2174 	/* Barrier, so that stop_queue visible to other cpus */
2175 	smp_mb__after_atomic();
2176 
2177 	if (cpdma_check_free_tx_desc(txch))
2178 		netif_tx_wake_queue(txq);
2179 
2180 	return NETDEV_TX_BUSY;
2181 }
2182 
2183 #if IS_ENABLED(CONFIG_TI_CPTS)
2184 
2185 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
2186 {
2187 	struct cpsw_common *cpsw = priv->cpsw;
2188 	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
2189 	u32 ts_en, seq_id;
2190 
2191 	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
2192 		slave_write(slave, 0, CPSW1_TS_CTL);
2193 		return;
2194 	}
2195 
2196 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2197 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2198 
2199 	if (priv->tx_ts_enabled)
2200 		ts_en |= CPSW_V1_TS_TX_EN;
2201 
2202 	if (priv->rx_ts_enabled)
2203 		ts_en |= CPSW_V1_TS_RX_EN;
2204 
2205 	slave_write(slave, ts_en, CPSW1_TS_CTL);
2206 	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2207 }
2208 
2209 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2210 {
2211 	struct cpsw_slave *slave;
2212 	struct cpsw_common *cpsw = priv->cpsw;
2213 	u32 ctrl, mtype;
2214 
2215 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2216 
2217 	ctrl = slave_read(slave, CPSW2_CONTROL);
2218 	switch (cpsw->version) {
2219 	case CPSW_VERSION_2:
2220 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
2221 
2222 		if (priv->tx_ts_enabled)
2223 			ctrl |= CTRL_V2_TX_TS_BITS;
2224 
2225 		if (priv->rx_ts_enabled)
2226 			ctrl |= CTRL_V2_RX_TS_BITS;
2227 		break;
2228 	case CPSW_VERSION_3:
2229 	default:
2230 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
2231 
2232 		if (priv->tx_ts_enabled)
2233 			ctrl |= CTRL_V3_TX_TS_BITS;
2234 
2235 		if (priv->rx_ts_enabled)
2236 			ctrl |= CTRL_V3_RX_TS_BITS;
2237 		break;
2238 	}
2239 
2240 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2241 
2242 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2243 	slave_write(slave, ctrl, CPSW2_CONTROL);
2244 	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
2245 	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
2246 }
2247 
2248 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2249 {
2250 	struct cpsw_priv *priv = netdev_priv(dev);
2251 	struct hwtstamp_config cfg;
2252 	struct cpsw_common *cpsw = priv->cpsw;
2253 
2254 	if (cpsw->version != CPSW_VERSION_1 &&
2255 	    cpsw->version != CPSW_VERSION_2 &&
2256 	    cpsw->version != CPSW_VERSION_3)
2257 		return -EOPNOTSUPP;
2258 
2259 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2260 		return -EFAULT;
2261 
2262 	/* reserved for future extensions */
2263 	if (cfg.flags)
2264 		return -EINVAL;
2265 
2266 	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
2267 		return -ERANGE;
2268 
2269 	switch (cfg.rx_filter) {
2270 	case HWTSTAMP_FILTER_NONE:
2271 		priv->rx_ts_enabled = 0;
2272 		break;
2273 	case HWTSTAMP_FILTER_ALL:
2274 	case HWTSTAMP_FILTER_NTP_ALL:
2275 		return -ERANGE;
2276 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2277 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2278 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2279 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2280 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2281 		break;
2282 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2283 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2284 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2285 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2286 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2287 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2288 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2289 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2290 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2291 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2292 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2293 		break;
2294 	default:
2295 		return -ERANGE;
2296 	}
2297 
2298 	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
2299 
2300 	switch (cpsw->version) {
2301 	case CPSW_VERSION_1:
2302 		cpsw_hwtstamp_v1(priv);
2303 		break;
2304 	case CPSW_VERSION_2:
2305 	case CPSW_VERSION_3:
2306 		cpsw_hwtstamp_v2(priv);
2307 		break;
2308 	default:
2309 		WARN_ON(1);
2310 	}
2311 
2312 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2313 }
2314 
2315 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2316 {
2317 	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2318 	struct cpsw_priv *priv = netdev_priv(dev);
2319 	struct hwtstamp_config cfg;
2320 
2321 	if (cpsw->version != CPSW_VERSION_1 &&
2322 	    cpsw->version != CPSW_VERSION_2 &&
2323 	    cpsw->version != CPSW_VERSION_3)
2324 		return -EOPNOTSUPP;
2325 
2326 	cfg.flags = 0;
2327 	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2328 	cfg.rx_filter = priv->rx_ts_enabled;
2329 
2330 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2331 }
2332 #else
2333 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2334 {
2335 	return -EOPNOTSUPP;
2336 }
2337 
2338 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2339 {
2340 	return -EOPNOTSUPP;
2341 }
2342 #endif /*CONFIG_TI_CPTS*/
2343 
2344 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2345 {
2346 	struct cpsw_priv *priv = netdev_priv(dev);
2347 	struct cpsw_common *cpsw = priv->cpsw;
2348 	int slave_no = cpsw_slave_index(cpsw, priv);
2349 
2350 	if (!netif_running(dev))
2351 		return -EINVAL;
2352 
2353 	switch (cmd) {
2354 	case SIOCSHWTSTAMP:
2355 		return cpsw_hwtstamp_set(dev, req);
2356 	case SIOCGHWTSTAMP:
2357 		return cpsw_hwtstamp_get(dev, req);
2358 	}
2359 
2360 	if (!cpsw->slaves[slave_no].phy)
2361 		return -EOPNOTSUPP;
2362 	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2363 }
2364 
2365 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2366 {
2367 	struct cpsw_priv *priv = netdev_priv(ndev);
2368 	struct cpsw_common *cpsw = priv->cpsw;
2369 	int ch;
2370 
2371 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2372 	ndev->stats.tx_errors++;
2373 	cpsw_intr_disable(cpsw);
2374 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2375 		cpdma_chan_stop(cpsw->txv[ch].ch);
2376 		cpdma_chan_start(cpsw->txv[ch].ch);
2377 	}
2378 
2379 	cpsw_intr_enable(cpsw);
2380 	netif_trans_update(ndev);
2381 	netif_tx_wake_all_queues(ndev);
2382 }
2383 
2384 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2385 {
2386 	struct cpsw_priv *priv = netdev_priv(ndev);
2387 	struct sockaddr *addr = (struct sockaddr *)p;
2388 	struct cpsw_common *cpsw = priv->cpsw;
2389 	int flags = 0;
2390 	u16 vid = 0;
2391 	int ret;
2392 
2393 	if (!is_valid_ether_addr(addr->sa_data))
2394 		return -EADDRNOTAVAIL;
2395 
2396 	ret = pm_runtime_get_sync(cpsw->dev);
2397 	if (ret < 0) {
2398 		pm_runtime_put_noidle(cpsw->dev);
2399 		return ret;
2400 	}
2401 
2402 	if (cpsw->data.dual_emac) {
2403 		vid = cpsw->slaves[priv->emac_port].port_vlan;
2404 		flags = ALE_VLAN;
2405 	}
2406 
2407 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2408 			   flags, vid);
2409 	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2410 			   flags, vid);
2411 
2412 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2413 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2414 	for_each_slave(priv, cpsw_set_slave_mac, priv);
2415 
2416 	pm_runtime_put(cpsw->dev);
2417 
2418 	return 0;
2419 }
2420 
2421 #ifdef CONFIG_NET_POLL_CONTROLLER
2422 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2423 {
2424 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2425 
2426 	cpsw_intr_disable(cpsw);
2427 	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2428 	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2429 	cpsw_intr_enable(cpsw);
2430 }
2431 #endif
2432 
2433 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2434 				unsigned short vid)
2435 {
2436 	int ret;
2437 	int unreg_mcast_mask = 0;
2438 	int mcast_mask;
2439 	u32 port_mask;
2440 	struct cpsw_common *cpsw = priv->cpsw;
2441 
2442 	if (cpsw->data.dual_emac) {
2443 		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2444 
2445 		mcast_mask = ALE_PORT_HOST;
2446 		if (priv->ndev->flags & IFF_ALLMULTI)
2447 			unreg_mcast_mask = mcast_mask;
2448 	} else {
2449 		port_mask = ALE_ALL_PORTS;
2450 		mcast_mask = port_mask;
2451 
2452 		if (priv->ndev->flags & IFF_ALLMULTI)
2453 			unreg_mcast_mask = ALE_ALL_PORTS;
2454 		else
2455 			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2456 	}
2457 
2458 	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2459 				unreg_mcast_mask);
2460 	if (ret != 0)
2461 		return ret;
2462 
2463 	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2464 				 HOST_PORT_NUM, ALE_VLAN, vid);
2465 	if (ret != 0)
2466 		goto clean_vid;
2467 
2468 	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2469 				 mcast_mask, ALE_VLAN, vid, 0);
2470 	if (ret != 0)
2471 		goto clean_vlan_ucast;
2472 	return 0;
2473 
2474 clean_vlan_ucast:
2475 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2476 			   HOST_PORT_NUM, ALE_VLAN, vid);
2477 clean_vid:
2478 	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2479 	return ret;
2480 }
2481 
2482 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2483 				    __be16 proto, u16 vid)
2484 {
2485 	struct cpsw_priv *priv = netdev_priv(ndev);
2486 	struct cpsw_common *cpsw = priv->cpsw;
2487 	int ret;
2488 
2489 	if (vid == cpsw->data.default_vlan)
2490 		return 0;
2491 
2492 	ret = pm_runtime_get_sync(cpsw->dev);
2493 	if (ret < 0) {
2494 		pm_runtime_put_noidle(cpsw->dev);
2495 		return ret;
2496 	}
2497 
2498 	if (cpsw->data.dual_emac) {
2499 		/* In dual EMAC, reserved VLAN id should not be used for
2500 		 * creating VLAN interfaces as this can break the dual
2501 		 * EMAC port separation
2502 		 */
2503 		int i;
2504 
2505 		for (i = 0; i < cpsw->data.slaves; i++) {
2506 			if (vid == cpsw->slaves[i].port_vlan) {
2507 				ret = -EINVAL;
2508 				goto err;
2509 			}
2510 		}
2511 	}
2512 
2513 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2514 	ret = cpsw_add_vlan_ale_entry(priv, vid);
2515 err:
2516 	pm_runtime_put(cpsw->dev);
2517 	return ret;
2518 }
2519 
2520 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2521 				     __be16 proto, u16 vid)
2522 {
2523 	struct cpsw_priv *priv = netdev_priv(ndev);
2524 	struct cpsw_common *cpsw = priv->cpsw;
2525 	int ret;
2526 
2527 	if (vid == cpsw->data.default_vlan)
2528 		return 0;
2529 
2530 	ret = pm_runtime_get_sync(cpsw->dev);
2531 	if (ret < 0) {
2532 		pm_runtime_put_noidle(cpsw->dev);
2533 		return ret;
2534 	}
2535 
2536 	if (cpsw->data.dual_emac) {
2537 		int i;
2538 
2539 		for (i = 0; i < cpsw->data.slaves; i++) {
2540 			if (vid == cpsw->slaves[i].port_vlan)
2541 				goto err;
2542 		}
2543 	}
2544 
2545 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2546 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2547 	ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2548 				  HOST_PORT_NUM, ALE_VLAN, vid);
2549 	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2550 				  0, ALE_VLAN, vid);
2551 	ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
2552 err:
2553 	pm_runtime_put(cpsw->dev);
2554 	return ret;
2555 }
2556 
2557 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2558 {
2559 	struct cpsw_priv *priv = netdev_priv(ndev);
2560 	struct cpsw_common *cpsw = priv->cpsw;
2561 	struct cpsw_slave *slave;
2562 	u32 min_rate;
2563 	u32 ch_rate;
2564 	int i, ret;
2565 
2566 	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2567 	if (ch_rate == rate)
2568 		return 0;
2569 
2570 	ch_rate = rate * 1000;
2571 	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2572 	if ((ch_rate < min_rate && ch_rate)) {
2573 		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2574 			min_rate);
2575 		return -EINVAL;
2576 	}
2577 
2578 	if (rate > cpsw->speed) {
2579 		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2580 		return -EINVAL;
2581 	}
2582 
2583 	ret = pm_runtime_get_sync(cpsw->dev);
2584 	if (ret < 0) {
2585 		pm_runtime_put_noidle(cpsw->dev);
2586 		return ret;
2587 	}
2588 
2589 	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2590 	pm_runtime_put(cpsw->dev);
2591 
2592 	if (ret)
2593 		return ret;
2594 
2595 	/* update rates for slaves tx queues */
2596 	for (i = 0; i < cpsw->data.slaves; i++) {
2597 		slave = &cpsw->slaves[i];
2598 		if (!slave->ndev)
2599 			continue;
2600 
2601 		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2602 	}
2603 
2604 	cpsw_split_res(ndev);
2605 	return ret;
2606 }
2607 
2608 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2609 {
2610 	struct tc_mqprio_qopt_offload *mqprio = type_data;
2611 	struct cpsw_priv *priv = netdev_priv(ndev);
2612 	struct cpsw_common *cpsw = priv->cpsw;
2613 	int fifo, num_tc, count, offset;
2614 	struct cpsw_slave *slave;
2615 	u32 tx_prio_map = 0;
2616 	int i, tc, ret;
2617 
2618 	num_tc = mqprio->qopt.num_tc;
2619 	if (num_tc > CPSW_TC_NUM)
2620 		return -EINVAL;
2621 
2622 	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2623 		return -EINVAL;
2624 
2625 	ret = pm_runtime_get_sync(cpsw->dev);
2626 	if (ret < 0) {
2627 		pm_runtime_put_noidle(cpsw->dev);
2628 		return ret;
2629 	}
2630 
2631 	if (num_tc) {
2632 		for (i = 0; i < 8; i++) {
2633 			tc = mqprio->qopt.prio_tc_map[i];
2634 			fifo = cpsw_tc_to_fifo(tc, num_tc);
2635 			tx_prio_map |= fifo << (4 * i);
2636 		}
2637 
2638 		netdev_set_num_tc(ndev, num_tc);
2639 		for (i = 0; i < num_tc; i++) {
2640 			count = mqprio->qopt.count[i];
2641 			offset = mqprio->qopt.offset[i];
2642 			netdev_set_tc_queue(ndev, i, count, offset);
2643 		}
2644 	}
2645 
2646 	if (!mqprio->qopt.hw) {
2647 		/* restore default configuration */
2648 		netdev_reset_tc(ndev);
2649 		tx_prio_map = TX_PRIORITY_MAPPING;
2650 	}
2651 
2652 	priv->mqprio_hw = mqprio->qopt.hw;
2653 
2654 	offset = cpsw->version == CPSW_VERSION_1 ?
2655 		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2656 
2657 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2658 	slave_write(slave, tx_prio_map, offset);
2659 
2660 	pm_runtime_put_sync(cpsw->dev);
2661 
2662 	return 0;
2663 }
2664 
2665 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2666 			     void *type_data)
2667 {
2668 	switch (type) {
2669 	case TC_SETUP_QDISC_CBS:
2670 		return cpsw_set_cbs(ndev, type_data);
2671 
2672 	case TC_SETUP_QDISC_MQPRIO:
2673 		return cpsw_set_mqprio(ndev, type_data);
2674 
2675 	default:
2676 		return -EOPNOTSUPP;
2677 	}
2678 }
2679 
2680 static const struct net_device_ops cpsw_netdev_ops = {
2681 	.ndo_open		= cpsw_ndo_open,
2682 	.ndo_stop		= cpsw_ndo_stop,
2683 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
2684 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
2685 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
2686 	.ndo_validate_addr	= eth_validate_addr,
2687 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
2688 	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
2689 	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
2690 #ifdef CONFIG_NET_POLL_CONTROLLER
2691 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
2692 #endif
2693 	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
2694 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
2695 	.ndo_setup_tc           = cpsw_ndo_setup_tc,
2696 };
2697 
2698 static int cpsw_get_regs_len(struct net_device *ndev)
2699 {
2700 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2701 
2702 	return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2703 }
2704 
2705 static void cpsw_get_regs(struct net_device *ndev,
2706 			  struct ethtool_regs *regs, void *p)
2707 {
2708 	u32 *reg = p;
2709 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2710 
2711 	/* update CPSW IP version */
2712 	regs->version = cpsw->version;
2713 
2714 	cpsw_ale_dump(cpsw->ale, reg);
2715 }
2716 
2717 static void cpsw_get_drvinfo(struct net_device *ndev,
2718 			     struct ethtool_drvinfo *info)
2719 {
2720 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2721 	struct platform_device	*pdev = to_platform_device(cpsw->dev);
2722 
2723 	strlcpy(info->driver, "cpsw", sizeof(info->driver));
2724 	strlcpy(info->version, "1.0", sizeof(info->version));
2725 	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2726 }
2727 
2728 static u32 cpsw_get_msglevel(struct net_device *ndev)
2729 {
2730 	struct cpsw_priv *priv = netdev_priv(ndev);
2731 	return priv->msg_enable;
2732 }
2733 
2734 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2735 {
2736 	struct cpsw_priv *priv = netdev_priv(ndev);
2737 	priv->msg_enable = value;
2738 }
2739 
2740 #if IS_ENABLED(CONFIG_TI_CPTS)
2741 static int cpsw_get_ts_info(struct net_device *ndev,
2742 			    struct ethtool_ts_info *info)
2743 {
2744 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2745 
2746 	info->so_timestamping =
2747 		SOF_TIMESTAMPING_TX_HARDWARE |
2748 		SOF_TIMESTAMPING_TX_SOFTWARE |
2749 		SOF_TIMESTAMPING_RX_HARDWARE |
2750 		SOF_TIMESTAMPING_RX_SOFTWARE |
2751 		SOF_TIMESTAMPING_SOFTWARE |
2752 		SOF_TIMESTAMPING_RAW_HARDWARE;
2753 	info->phc_index = cpsw->cpts->phc_index;
2754 	info->tx_types =
2755 		(1 << HWTSTAMP_TX_OFF) |
2756 		(1 << HWTSTAMP_TX_ON);
2757 	info->rx_filters =
2758 		(1 << HWTSTAMP_FILTER_NONE) |
2759 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2760 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2761 	return 0;
2762 }
2763 #else
2764 static int cpsw_get_ts_info(struct net_device *ndev,
2765 			    struct ethtool_ts_info *info)
2766 {
2767 	info->so_timestamping =
2768 		SOF_TIMESTAMPING_TX_SOFTWARE |
2769 		SOF_TIMESTAMPING_RX_SOFTWARE |
2770 		SOF_TIMESTAMPING_SOFTWARE;
2771 	info->phc_index = -1;
2772 	info->tx_types = 0;
2773 	info->rx_filters = 0;
2774 	return 0;
2775 }
2776 #endif
2777 
2778 static int cpsw_get_link_ksettings(struct net_device *ndev,
2779 				   struct ethtool_link_ksettings *ecmd)
2780 {
2781 	struct cpsw_priv *priv = netdev_priv(ndev);
2782 	struct cpsw_common *cpsw = priv->cpsw;
2783 	int slave_no = cpsw_slave_index(cpsw, priv);
2784 
2785 	if (!cpsw->slaves[slave_no].phy)
2786 		return -EOPNOTSUPP;
2787 
2788 	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2789 	return 0;
2790 }
2791 
2792 static int cpsw_set_link_ksettings(struct net_device *ndev,
2793 				   const struct ethtool_link_ksettings *ecmd)
2794 {
2795 	struct cpsw_priv *priv = netdev_priv(ndev);
2796 	struct cpsw_common *cpsw = priv->cpsw;
2797 	int slave_no = cpsw_slave_index(cpsw, priv);
2798 
2799 	if (cpsw->slaves[slave_no].phy)
2800 		return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2801 						 ecmd);
2802 	else
2803 		return -EOPNOTSUPP;
2804 }
2805 
2806 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2807 {
2808 	struct cpsw_priv *priv = netdev_priv(ndev);
2809 	struct cpsw_common *cpsw = priv->cpsw;
2810 	int slave_no = cpsw_slave_index(cpsw, priv);
2811 
2812 	wol->supported = 0;
2813 	wol->wolopts = 0;
2814 
2815 	if (cpsw->slaves[slave_no].phy)
2816 		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2817 }
2818 
2819 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2820 {
2821 	struct cpsw_priv *priv = netdev_priv(ndev);
2822 	struct cpsw_common *cpsw = priv->cpsw;
2823 	int slave_no = cpsw_slave_index(cpsw, priv);
2824 
2825 	if (cpsw->slaves[slave_no].phy)
2826 		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2827 	else
2828 		return -EOPNOTSUPP;
2829 }
2830 
2831 static void cpsw_get_pauseparam(struct net_device *ndev,
2832 				struct ethtool_pauseparam *pause)
2833 {
2834 	struct cpsw_priv *priv = netdev_priv(ndev);
2835 
2836 	pause->autoneg = AUTONEG_DISABLE;
2837 	pause->rx_pause = priv->rx_pause ? true : false;
2838 	pause->tx_pause = priv->tx_pause ? true : false;
2839 }
2840 
2841 static int cpsw_set_pauseparam(struct net_device *ndev,
2842 			       struct ethtool_pauseparam *pause)
2843 {
2844 	struct cpsw_priv *priv = netdev_priv(ndev);
2845 	bool link;
2846 
2847 	priv->rx_pause = pause->rx_pause ? true : false;
2848 	priv->tx_pause = pause->tx_pause ? true : false;
2849 
2850 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2851 	return 0;
2852 }
2853 
2854 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2855 {
2856 	struct cpsw_priv *priv = netdev_priv(ndev);
2857 	struct cpsw_common *cpsw = priv->cpsw;
2858 	int ret;
2859 
2860 	ret = pm_runtime_get_sync(cpsw->dev);
2861 	if (ret < 0) {
2862 		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2863 		pm_runtime_put_noidle(cpsw->dev);
2864 	}
2865 
2866 	return ret;
2867 }
2868 
2869 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2870 {
2871 	struct cpsw_priv *priv = netdev_priv(ndev);
2872 	int ret;
2873 
2874 	ret = pm_runtime_put(priv->cpsw->dev);
2875 	if (ret < 0)
2876 		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2877 }
2878 
2879 static void cpsw_get_channels(struct net_device *ndev,
2880 			      struct ethtool_channels *ch)
2881 {
2882 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2883 
2884 	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2885 	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2886 	ch->max_combined = 0;
2887 	ch->max_other = 0;
2888 	ch->other_count = 0;
2889 	ch->rx_count = cpsw->rx_ch_num;
2890 	ch->tx_count = cpsw->tx_ch_num;
2891 	ch->combined_count = 0;
2892 }
2893 
2894 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2895 				  struct ethtool_channels *ch)
2896 {
2897 	if (cpsw->quirk_irq) {
2898 		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2899 		return -EOPNOTSUPP;
2900 	}
2901 
2902 	if (ch->combined_count)
2903 		return -EINVAL;
2904 
2905 	/* verify we have at least one channel in each direction */
2906 	if (!ch->rx_count || !ch->tx_count)
2907 		return -EINVAL;
2908 
2909 	if (ch->rx_count > cpsw->data.channels ||
2910 	    ch->tx_count > cpsw->data.channels)
2911 		return -EINVAL;
2912 
2913 	return 0;
2914 }
2915 
2916 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2917 {
2918 	struct cpsw_common *cpsw = priv->cpsw;
2919 	void (*handler)(void *, int, int);
2920 	struct netdev_queue *queue;
2921 	struct cpsw_vector *vec;
2922 	int ret, *ch, vch;
2923 
2924 	if (rx) {
2925 		ch = &cpsw->rx_ch_num;
2926 		vec = cpsw->rxv;
2927 		handler = cpsw_rx_handler;
2928 	} else {
2929 		ch = &cpsw->tx_ch_num;
2930 		vec = cpsw->txv;
2931 		handler = cpsw_tx_handler;
2932 	}
2933 
2934 	while (*ch < ch_num) {
2935 		vch = rx ? *ch : 7 - *ch;
2936 		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2937 		queue = netdev_get_tx_queue(priv->ndev, *ch);
2938 		queue->tx_maxrate = 0;
2939 
2940 		if (IS_ERR(vec[*ch].ch))
2941 			return PTR_ERR(vec[*ch].ch);
2942 
2943 		if (!vec[*ch].ch)
2944 			return -EINVAL;
2945 
2946 		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2947 			  (rx ? "rx" : "tx"));
2948 		(*ch)++;
2949 	}
2950 
2951 	while (*ch > ch_num) {
2952 		(*ch)--;
2953 
2954 		ret = cpdma_chan_destroy(vec[*ch].ch);
2955 		if (ret)
2956 			return ret;
2957 
2958 		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2959 			  (rx ? "rx" : "tx"));
2960 	}
2961 
2962 	return 0;
2963 }
2964 
2965 static int cpsw_update_channels(struct cpsw_priv *priv,
2966 				struct ethtool_channels *ch)
2967 {
2968 	int ret;
2969 
2970 	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2971 	if (ret)
2972 		return ret;
2973 
2974 	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2975 	if (ret)
2976 		return ret;
2977 
2978 	return 0;
2979 }
2980 
2981 static void cpsw_suspend_data_pass(struct net_device *ndev)
2982 {
2983 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2984 	struct cpsw_slave *slave;
2985 	int i;
2986 
2987 	/* Disable NAPI scheduling */
2988 	cpsw_intr_disable(cpsw);
2989 
2990 	/* Stop all transmit queues for every network device.
2991 	 * Disable re-using rx descriptors with dormant_on.
2992 	 */
2993 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2994 		if (!(slave->ndev && netif_running(slave->ndev)))
2995 			continue;
2996 
2997 		netif_tx_stop_all_queues(slave->ndev);
2998 		netif_dormant_on(slave->ndev);
2999 	}
3000 
3001 	/* Handle rest of tx packets and stop cpdma channels */
3002 	cpdma_ctlr_stop(cpsw->dma);
3003 }
3004 
3005 static int cpsw_resume_data_pass(struct net_device *ndev)
3006 {
3007 	struct cpsw_priv *priv = netdev_priv(ndev);
3008 	struct cpsw_common *cpsw = priv->cpsw;
3009 	struct cpsw_slave *slave;
3010 	int i, ret;
3011 
3012 	/* Allow rx packets handling */
3013 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3014 		if (slave->ndev && netif_running(slave->ndev))
3015 			netif_dormant_off(slave->ndev);
3016 
3017 	/* After this receive is started */
3018 	if (cpsw->usage_count) {
3019 		ret = cpsw_fill_rx_channels(priv);
3020 		if (ret)
3021 			return ret;
3022 
3023 		cpdma_ctlr_start(cpsw->dma);
3024 		cpsw_intr_enable(cpsw);
3025 	}
3026 
3027 	/* Resume transmit for every affected interface */
3028 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3029 		if (slave->ndev && netif_running(slave->ndev))
3030 			netif_tx_start_all_queues(slave->ndev);
3031 
3032 	return 0;
3033 }
3034 
3035 static int cpsw_set_channels(struct net_device *ndev,
3036 			     struct ethtool_channels *chs)
3037 {
3038 	struct cpsw_priv *priv = netdev_priv(ndev);
3039 	struct cpsw_common *cpsw = priv->cpsw;
3040 	struct cpsw_slave *slave;
3041 	int i, ret;
3042 
3043 	ret = cpsw_check_ch_settings(cpsw, chs);
3044 	if (ret < 0)
3045 		return ret;
3046 
3047 	cpsw_suspend_data_pass(ndev);
3048 	ret = cpsw_update_channels(priv, chs);
3049 	if (ret)
3050 		goto err;
3051 
3052 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
3053 		if (!(slave->ndev && netif_running(slave->ndev)))
3054 			continue;
3055 
3056 		/* Inform stack about new count of queues */
3057 		ret = netif_set_real_num_tx_queues(slave->ndev,
3058 						   cpsw->tx_ch_num);
3059 		if (ret) {
3060 			dev_err(priv->dev, "cannot set real number of tx queues\n");
3061 			goto err;
3062 		}
3063 
3064 		ret = netif_set_real_num_rx_queues(slave->ndev,
3065 						   cpsw->rx_ch_num);
3066 		if (ret) {
3067 			dev_err(priv->dev, "cannot set real number of rx queues\n");
3068 			goto err;
3069 		}
3070 	}
3071 
3072 	if (cpsw->usage_count)
3073 		cpsw_split_res(ndev);
3074 
3075 	ret = cpsw_resume_data_pass(ndev);
3076 	if (!ret)
3077 		return 0;
3078 err:
3079 	dev_err(priv->dev, "cannot update channels number, closing device\n");
3080 	dev_close(ndev);
3081 	return ret;
3082 }
3083 
3084 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
3085 {
3086 	struct cpsw_priv *priv = netdev_priv(ndev);
3087 	struct cpsw_common *cpsw = priv->cpsw;
3088 	int slave_no = cpsw_slave_index(cpsw, priv);
3089 
3090 	if (cpsw->slaves[slave_no].phy)
3091 		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
3092 	else
3093 		return -EOPNOTSUPP;
3094 }
3095 
3096 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
3097 {
3098 	struct cpsw_priv *priv = netdev_priv(ndev);
3099 	struct cpsw_common *cpsw = priv->cpsw;
3100 	int slave_no = cpsw_slave_index(cpsw, priv);
3101 
3102 	if (cpsw->slaves[slave_no].phy)
3103 		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
3104 	else
3105 		return -EOPNOTSUPP;
3106 }
3107 
3108 static int cpsw_nway_reset(struct net_device *ndev)
3109 {
3110 	struct cpsw_priv *priv = netdev_priv(ndev);
3111 	struct cpsw_common *cpsw = priv->cpsw;
3112 	int slave_no = cpsw_slave_index(cpsw, priv);
3113 
3114 	if (cpsw->slaves[slave_no].phy)
3115 		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
3116 	else
3117 		return -EOPNOTSUPP;
3118 }
3119 
3120 static void cpsw_get_ringparam(struct net_device *ndev,
3121 			       struct ethtool_ringparam *ering)
3122 {
3123 	struct cpsw_priv *priv = netdev_priv(ndev);
3124 	struct cpsw_common *cpsw = priv->cpsw;
3125 
3126 	/* not supported */
3127 	ering->tx_max_pending = 0;
3128 	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
3129 	ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
3130 	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
3131 }
3132 
3133 static int cpsw_set_ringparam(struct net_device *ndev,
3134 			      struct ethtool_ringparam *ering)
3135 {
3136 	struct cpsw_priv *priv = netdev_priv(ndev);
3137 	struct cpsw_common *cpsw = priv->cpsw;
3138 	int ret;
3139 
3140 	/* ignore ering->tx_pending - only rx_pending adjustment is supported */
3141 
3142 	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
3143 	    ering->rx_pending < CPSW_MAX_QUEUES ||
3144 	    ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
3145 		return -EINVAL;
3146 
3147 	if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3148 		return 0;
3149 
3150 	cpsw_suspend_data_pass(ndev);
3151 
3152 	cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3153 
3154 	if (cpsw->usage_count)
3155 		cpdma_chan_split_pool(cpsw->dma);
3156 
3157 	ret = cpsw_resume_data_pass(ndev);
3158 	if (!ret)
3159 		return 0;
3160 
3161 	dev_err(&ndev->dev, "cannot set ring params, closing device\n");
3162 	dev_close(ndev);
3163 	return ret;
3164 }
3165 
3166 static const struct ethtool_ops cpsw_ethtool_ops = {
3167 	.get_drvinfo	= cpsw_get_drvinfo,
3168 	.get_msglevel	= cpsw_get_msglevel,
3169 	.set_msglevel	= cpsw_set_msglevel,
3170 	.get_link	= ethtool_op_get_link,
3171 	.get_ts_info	= cpsw_get_ts_info,
3172 	.get_coalesce	= cpsw_get_coalesce,
3173 	.set_coalesce	= cpsw_set_coalesce,
3174 	.get_sset_count		= cpsw_get_sset_count,
3175 	.get_strings		= cpsw_get_strings,
3176 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
3177 	.get_pauseparam		= cpsw_get_pauseparam,
3178 	.set_pauseparam		= cpsw_set_pauseparam,
3179 	.get_wol	= cpsw_get_wol,
3180 	.set_wol	= cpsw_set_wol,
3181 	.get_regs_len	= cpsw_get_regs_len,
3182 	.get_regs	= cpsw_get_regs,
3183 	.begin		= cpsw_ethtool_op_begin,
3184 	.complete	= cpsw_ethtool_op_complete,
3185 	.get_channels	= cpsw_get_channels,
3186 	.set_channels	= cpsw_set_channels,
3187 	.get_link_ksettings	= cpsw_get_link_ksettings,
3188 	.set_link_ksettings	= cpsw_set_link_ksettings,
3189 	.get_eee	= cpsw_get_eee,
3190 	.set_eee	= cpsw_set_eee,
3191 	.nway_reset	= cpsw_nway_reset,
3192 	.get_ringparam = cpsw_get_ringparam,
3193 	.set_ringparam = cpsw_set_ringparam,
3194 };
3195 
3196 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
3197 			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
3198 {
3199 	void __iomem		*regs = cpsw->regs;
3200 	int			slave_num = slave->slave_num;
3201 	struct cpsw_slave_data	*data = cpsw->data.slave_data + slave_num;
3202 
3203 	slave->data	= data;
3204 	slave->regs	= regs + slave_reg_ofs;
3205 	slave->sliver	= regs + sliver_reg_ofs;
3206 	slave->port_vlan = data->dual_emac_res_vlan;
3207 }
3208 
3209 static int cpsw_probe_dt(struct cpsw_platform_data *data,
3210 			 struct platform_device *pdev)
3211 {
3212 	struct device_node *node = pdev->dev.of_node;
3213 	struct device_node *slave_node;
3214 	int i = 0, ret;
3215 	u32 prop;
3216 
3217 	if (!node)
3218 		return -EINVAL;
3219 
3220 	if (of_property_read_u32(node, "slaves", &prop)) {
3221 		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
3222 		return -EINVAL;
3223 	}
3224 	data->slaves = prop;
3225 
3226 	if (of_property_read_u32(node, "active_slave", &prop)) {
3227 		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
3228 		return -EINVAL;
3229 	}
3230 	data->active_slave = prop;
3231 
3232 	data->slave_data = devm_kcalloc(&pdev->dev,
3233 					data->slaves,
3234 					sizeof(struct cpsw_slave_data),
3235 					GFP_KERNEL);
3236 	if (!data->slave_data)
3237 		return -ENOMEM;
3238 
3239 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
3240 		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
3241 		return -EINVAL;
3242 	}
3243 	data->channels = prop;
3244 
3245 	if (of_property_read_u32(node, "ale_entries", &prop)) {
3246 		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
3247 		return -EINVAL;
3248 	}
3249 	data->ale_entries = prop;
3250 
3251 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
3252 		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
3253 		return -EINVAL;
3254 	}
3255 	data->bd_ram_size = prop;
3256 
3257 	if (of_property_read_u32(node, "mac_control", &prop)) {
3258 		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
3259 		return -EINVAL;
3260 	}
3261 	data->mac_control = prop;
3262 
3263 	if (of_property_read_bool(node, "dual_emac"))
3264 		data->dual_emac = 1;
3265 
3266 	/*
3267 	 * Populate all the child nodes here...
3268 	 */
3269 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3270 	/* We do not want to force this, as in some cases may not have child */
3271 	if (ret)
3272 		dev_warn(&pdev->dev, "Doesn't have any child node\n");
3273 
3274 	for_each_available_child_of_node(node, slave_node) {
3275 		struct cpsw_slave_data *slave_data = data->slave_data + i;
3276 		const void *mac_addr = NULL;
3277 		int lenp;
3278 		const __be32 *parp;
3279 
3280 		/* This is no slave child node, continue */
3281 		if (!of_node_name_eq(slave_node, "slave"))
3282 			continue;
3283 
3284 		slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
3285 						    NULL);
3286 		if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
3287 		    IS_ERR(slave_data->ifphy)) {
3288 			ret = PTR_ERR(slave_data->ifphy);
3289 			dev_err(&pdev->dev,
3290 				"%d: Error retrieving port phy: %d\n", i, ret);
3291 			return ret;
3292 		}
3293 
3294 		slave_data->phy_node = of_parse_phandle(slave_node,
3295 							"phy-handle", 0);
3296 		parp = of_get_property(slave_node, "phy_id", &lenp);
3297 		if (slave_data->phy_node) {
3298 			dev_dbg(&pdev->dev,
3299 				"slave[%d] using phy-handle=\"%pOF\"\n",
3300 				i, slave_data->phy_node);
3301 		} else if (of_phy_is_fixed_link(slave_node)) {
3302 			/* In the case of a fixed PHY, the DT node associated
3303 			 * to the PHY is the Ethernet MAC DT node.
3304 			 */
3305 			ret = of_phy_register_fixed_link(slave_node);
3306 			if (ret) {
3307 				if (ret != -EPROBE_DEFER)
3308 					dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
3309 				return ret;
3310 			}
3311 			slave_data->phy_node = of_node_get(slave_node);
3312 		} else if (parp) {
3313 			u32 phyid;
3314 			struct device_node *mdio_node;
3315 			struct platform_device *mdio;
3316 
3317 			if (lenp != (sizeof(__be32) * 2)) {
3318 				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3319 				goto no_phy_slave;
3320 			}
3321 			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3322 			phyid = be32_to_cpup(parp+1);
3323 			mdio = of_find_device_by_node(mdio_node);
3324 			of_node_put(mdio_node);
3325 			if (!mdio) {
3326 				dev_err(&pdev->dev, "Missing mdio platform device\n");
3327 				return -EINVAL;
3328 			}
3329 			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3330 				 PHY_ID_FMT, mdio->name, phyid);
3331 			put_device(&mdio->dev);
3332 		} else {
3333 			dev_err(&pdev->dev,
3334 				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3335 				i);
3336 			goto no_phy_slave;
3337 		}
3338 		slave_data->phy_if = of_get_phy_mode(slave_node);
3339 		if (slave_data->phy_if < 0) {
3340 			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3341 				i);
3342 			return slave_data->phy_if;
3343 		}
3344 
3345 no_phy_slave:
3346 		mac_addr = of_get_mac_address(slave_node);
3347 		if (mac_addr) {
3348 			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
3349 		} else {
3350 			ret = ti_cm_get_macid(&pdev->dev, i,
3351 					      slave_data->mac_addr);
3352 			if (ret)
3353 				return ret;
3354 		}
3355 		if (data->dual_emac) {
3356 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
3357 						 &prop)) {
3358 				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
3359 				slave_data->dual_emac_res_vlan = i+1;
3360 				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3361 					slave_data->dual_emac_res_vlan, i);
3362 			} else {
3363 				slave_data->dual_emac_res_vlan = prop;
3364 			}
3365 		}
3366 
3367 		i++;
3368 		if (i == data->slaves)
3369 			break;
3370 	}
3371 
3372 	return 0;
3373 }
3374 
3375 static void cpsw_remove_dt(struct platform_device *pdev)
3376 {
3377 	struct net_device *ndev = platform_get_drvdata(pdev);
3378 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3379 	struct cpsw_platform_data *data = &cpsw->data;
3380 	struct device_node *node = pdev->dev.of_node;
3381 	struct device_node *slave_node;
3382 	int i = 0;
3383 
3384 	for_each_available_child_of_node(node, slave_node) {
3385 		struct cpsw_slave_data *slave_data = &data->slave_data[i];
3386 
3387 		if (!of_node_name_eq(slave_node, "slave"))
3388 			continue;
3389 
3390 		if (of_phy_is_fixed_link(slave_node))
3391 			of_phy_deregister_fixed_link(slave_node);
3392 
3393 		of_node_put(slave_data->phy_node);
3394 
3395 		i++;
3396 		if (i == data->slaves)
3397 			break;
3398 	}
3399 
3400 	of_platform_depopulate(&pdev->dev);
3401 }
3402 
3403 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
3404 {
3405 	struct cpsw_common		*cpsw = priv->cpsw;
3406 	struct cpsw_platform_data	*data = &cpsw->data;
3407 	struct net_device		*ndev;
3408 	struct cpsw_priv		*priv_sl2;
3409 	int ret = 0;
3410 
3411 	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3412 	if (!ndev) {
3413 		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
3414 		return -ENOMEM;
3415 	}
3416 
3417 	priv_sl2 = netdev_priv(ndev);
3418 	priv_sl2->cpsw = cpsw;
3419 	priv_sl2->ndev = ndev;
3420 	priv_sl2->dev  = &ndev->dev;
3421 	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3422 
3423 	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3424 		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3425 			ETH_ALEN);
3426 		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3427 			 priv_sl2->mac_addr);
3428 	} else {
3429 		eth_random_addr(priv_sl2->mac_addr);
3430 		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3431 			 priv_sl2->mac_addr);
3432 	}
3433 	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3434 
3435 	priv_sl2->emac_port = 1;
3436 	cpsw->slaves[1].ndev = ndev;
3437 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3438 
3439 	ndev->netdev_ops = &cpsw_netdev_ops;
3440 	ndev->ethtool_ops = &cpsw_ethtool_ops;
3441 
3442 	/* register the network device */
3443 	SET_NETDEV_DEV(ndev, cpsw->dev);
3444 	ret = register_netdev(ndev);
3445 	if (ret) {
3446 		dev_err(cpsw->dev, "cpsw: error registering net device\n");
3447 		free_netdev(ndev);
3448 		ret = -ENODEV;
3449 	}
3450 
3451 	return ret;
3452 }
3453 
3454 static const struct of_device_id cpsw_of_mtable[] = {
3455 	{ .compatible = "ti,cpsw"},
3456 	{ .compatible = "ti,am335x-cpsw"},
3457 	{ .compatible = "ti,am4372-cpsw"},
3458 	{ .compatible = "ti,dra7-cpsw"},
3459 	{ /* sentinel */ },
3460 };
3461 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3462 
3463 static const struct soc_device_attribute cpsw_soc_devices[] = {
3464 	{ .family = "AM33xx", .revision = "ES1.0"},
3465 	{ /* sentinel */ }
3466 };
3467 
3468 static int cpsw_probe(struct platform_device *pdev)
3469 {
3470 	struct clk			*clk;
3471 	struct cpsw_platform_data	*data;
3472 	struct net_device		*ndev;
3473 	struct cpsw_priv		*priv;
3474 	struct cpdma_params		dma_params;
3475 	struct cpsw_ale_params		ale_params;
3476 	void __iomem			*ss_regs;
3477 	void __iomem			*cpts_regs;
3478 	struct resource			*res, *ss_res;
3479 	struct gpio_descs		*mode;
3480 	u32 slave_offset, sliver_offset, slave_size;
3481 	const struct soc_device_attribute *soc;
3482 	struct cpsw_common		*cpsw;
3483 	int ret = 0, i, ch;
3484 	int irq;
3485 
3486 	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
3487 	if (!cpsw)
3488 		return -ENOMEM;
3489 
3490 	cpsw->dev = &pdev->dev;
3491 
3492 	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3493 	if (!ndev) {
3494 		dev_err(&pdev->dev, "error allocating net_device\n");
3495 		return -ENOMEM;
3496 	}
3497 
3498 	platform_set_drvdata(pdev, ndev);
3499 	priv = netdev_priv(ndev);
3500 	priv->cpsw = cpsw;
3501 	priv->ndev = ndev;
3502 	priv->dev  = &ndev->dev;
3503 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3504 	cpsw->rx_packet_max = max(rx_packet_max, 128);
3505 
3506 	mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
3507 	if (IS_ERR(mode)) {
3508 		ret = PTR_ERR(mode);
3509 		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
3510 		goto clean_ndev_ret;
3511 	}
3512 
3513 	/*
3514 	 * This may be required here for child devices.
3515 	 */
3516 	pm_runtime_enable(&pdev->dev);
3517 
3518 	/* Select default pin state */
3519 	pinctrl_pm_select_default_state(&pdev->dev);
3520 
3521 	/* Need to enable clocks with runtime PM api to access module
3522 	 * registers
3523 	 */
3524 	ret = pm_runtime_get_sync(&pdev->dev);
3525 	if (ret < 0) {
3526 		pm_runtime_put_noidle(&pdev->dev);
3527 		goto clean_runtime_disable_ret;
3528 	}
3529 
3530 	ret = cpsw_probe_dt(&cpsw->data, pdev);
3531 	if (ret)
3532 		goto clean_dt_ret;
3533 
3534 	data = &cpsw->data;
3535 	cpsw->rx_ch_num = 1;
3536 	cpsw->tx_ch_num = 1;
3537 
3538 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3539 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
3540 		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
3541 	} else {
3542 		eth_random_addr(priv->mac_addr);
3543 		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
3544 	}
3545 
3546 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3547 
3548 	cpsw->slaves = devm_kcalloc(&pdev->dev,
3549 				    data->slaves, sizeof(struct cpsw_slave),
3550 				    GFP_KERNEL);
3551 	if (!cpsw->slaves) {
3552 		ret = -ENOMEM;
3553 		goto clean_dt_ret;
3554 	}
3555 	for (i = 0; i < data->slaves; i++)
3556 		cpsw->slaves[i].slave_num = i;
3557 
3558 	cpsw->slaves[0].ndev = ndev;
3559 	priv->emac_port = 0;
3560 
3561 	clk = devm_clk_get(&pdev->dev, "fck");
3562 	if (IS_ERR(clk)) {
3563 		dev_err(priv->dev, "fck is not found\n");
3564 		ret = -ENODEV;
3565 		goto clean_dt_ret;
3566 	}
3567 	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
3568 
3569 	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3570 	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
3571 	if (IS_ERR(ss_regs)) {
3572 		ret = PTR_ERR(ss_regs);
3573 		goto clean_dt_ret;
3574 	}
3575 	cpsw->regs = ss_regs;
3576 
3577 	cpsw->version = readl(&cpsw->regs->id_ver);
3578 
3579 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3580 	cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
3581 	if (IS_ERR(cpsw->wr_regs)) {
3582 		ret = PTR_ERR(cpsw->wr_regs);
3583 		goto clean_dt_ret;
3584 	}
3585 
3586 	memset(&dma_params, 0, sizeof(dma_params));
3587 	memset(&ale_params, 0, sizeof(ale_params));
3588 
3589 	switch (cpsw->version) {
3590 	case CPSW_VERSION_1:
3591 		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
3592 		cpts_regs		= ss_regs + CPSW1_CPTS_OFFSET;
3593 		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
3594 		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
3595 		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
3596 		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
3597 		slave_offset         = CPSW1_SLAVE_OFFSET;
3598 		slave_size           = CPSW1_SLAVE_SIZE;
3599 		sliver_offset        = CPSW1_SLIVER_OFFSET;
3600 		dma_params.desc_mem_phys = 0;
3601 		break;
3602 	case CPSW_VERSION_2:
3603 	case CPSW_VERSION_3:
3604 	case CPSW_VERSION_4:
3605 		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
3606 		cpts_regs		= ss_regs + CPSW2_CPTS_OFFSET;
3607 		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
3608 		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
3609 		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
3610 		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
3611 		slave_offset         = CPSW2_SLAVE_OFFSET;
3612 		slave_size           = CPSW2_SLAVE_SIZE;
3613 		sliver_offset        = CPSW2_SLIVER_OFFSET;
3614 		dma_params.desc_mem_phys =
3615 			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
3616 		break;
3617 	default:
3618 		dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
3619 		ret = -ENODEV;
3620 		goto clean_dt_ret;
3621 	}
3622 	for (i = 0; i < cpsw->data.slaves; i++) {
3623 		struct cpsw_slave *slave = &cpsw->slaves[i];
3624 
3625 		cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
3626 		slave_offset  += slave_size;
3627 		sliver_offset += SLIVER_SIZE;
3628 	}
3629 
3630 	dma_params.dev		= &pdev->dev;
3631 	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
3632 	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
3633 	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
3634 	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
3635 	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
3636 
3637 	dma_params.num_chan		= data->channels;
3638 	dma_params.has_soft_reset	= true;
3639 	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
3640 	dma_params.desc_mem_size	= data->bd_ram_size;
3641 	dma_params.desc_align		= 16;
3642 	dma_params.has_ext_regs		= true;
3643 	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
3644 	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
3645 	dma_params.descs_pool_size	= descs_pool_size;
3646 
3647 	cpsw->dma = cpdma_ctlr_create(&dma_params);
3648 	if (!cpsw->dma) {
3649 		dev_err(priv->dev, "error initializing dma\n");
3650 		ret = -ENOMEM;
3651 		goto clean_dt_ret;
3652 	}
3653 
3654 	soc = soc_device_match(cpsw_soc_devices);
3655 	if (soc)
3656 		cpsw->quirk_irq = 1;
3657 
3658 	ch = cpsw->quirk_irq ? 0 : 7;
3659 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
3660 	if (IS_ERR(cpsw->txv[0].ch)) {
3661 		dev_err(priv->dev, "error initializing tx dma channel\n");
3662 		ret = PTR_ERR(cpsw->txv[0].ch);
3663 		goto clean_dma_ret;
3664 	}
3665 
3666 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3667 	if (IS_ERR(cpsw->rxv[0].ch)) {
3668 		dev_err(priv->dev, "error initializing rx dma channel\n");
3669 		ret = PTR_ERR(cpsw->rxv[0].ch);
3670 		goto clean_dma_ret;
3671 	}
3672 
3673 	ale_params.dev			= &pdev->dev;
3674 	ale_params.ale_ageout		= ale_ageout;
3675 	ale_params.ale_entries		= data->ale_entries;
3676 	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
3677 
3678 	cpsw->ale = cpsw_ale_create(&ale_params);
3679 	if (!cpsw->ale) {
3680 		dev_err(priv->dev, "error initializing ale engine\n");
3681 		ret = -ENODEV;
3682 		goto clean_dma_ret;
3683 	}
3684 
3685 	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3686 	if (IS_ERR(cpsw->cpts)) {
3687 		ret = PTR_ERR(cpsw->cpts);
3688 		goto clean_dma_ret;
3689 	}
3690 
3691 	ndev->irq = platform_get_irq(pdev, 1);
3692 	if (ndev->irq < 0) {
3693 		dev_err(priv->dev, "error getting irq resource\n");
3694 		ret = ndev->irq;
3695 		goto clean_dma_ret;
3696 	}
3697 
3698 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3699 
3700 	ndev->netdev_ops = &cpsw_netdev_ops;
3701 	ndev->ethtool_ops = &cpsw_ethtool_ops;
3702 	netif_napi_add(ndev, &cpsw->napi_rx,
3703 		       cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3704 		       CPSW_POLL_WEIGHT);
3705 	netif_tx_napi_add(ndev, &cpsw->napi_tx,
3706 			  cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3707 			  CPSW_POLL_WEIGHT);
3708 	cpsw_split_res(ndev);
3709 
3710 	/* register the network device */
3711 	SET_NETDEV_DEV(ndev, &pdev->dev);
3712 	ret = register_netdev(ndev);
3713 	if (ret) {
3714 		dev_err(priv->dev, "error registering net device\n");
3715 		ret = -ENODEV;
3716 		goto clean_dma_ret;
3717 	}
3718 
3719 	if (cpsw->data.dual_emac) {
3720 		ret = cpsw_probe_dual_emac(priv);
3721 		if (ret) {
3722 			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3723 			goto clean_unregister_netdev_ret;
3724 		}
3725 	}
3726 
3727 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3728 	 * MISC IRQs which are always kept disabled with this driver so
3729 	 * we will not request them.
3730 	 *
3731 	 * If anyone wants to implement support for those, make sure to
3732 	 * first request and append them to irqs_table array.
3733 	 */
3734 
3735 	/* RX IRQ */
3736 	irq = platform_get_irq(pdev, 1);
3737 	if (irq < 0) {
3738 		ret = irq;
3739 		goto clean_dma_ret;
3740 	}
3741 
3742 	cpsw->irqs_table[0] = irq;
3743 	ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
3744 			       0, dev_name(&pdev->dev), cpsw);
3745 	if (ret < 0) {
3746 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3747 		goto clean_dma_ret;
3748 	}
3749 
3750 	/* TX IRQ */
3751 	irq = platform_get_irq(pdev, 2);
3752 	if (irq < 0) {
3753 		ret = irq;
3754 		goto clean_dma_ret;
3755 	}
3756 
3757 	cpsw->irqs_table[1] = irq;
3758 	ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
3759 			       0, dev_name(&pdev->dev), cpsw);
3760 	if (ret < 0) {
3761 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3762 		goto clean_dma_ret;
3763 	}
3764 
3765 	cpsw_notice(priv, probe,
3766 		    "initialized device (regs %pa, irq %d, pool size %d)\n",
3767 		    &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3768 
3769 	pm_runtime_put(&pdev->dev);
3770 
3771 	return 0;
3772 
3773 clean_unregister_netdev_ret:
3774 	unregister_netdev(ndev);
3775 clean_dma_ret:
3776 	cpdma_ctlr_destroy(cpsw->dma);
3777 clean_dt_ret:
3778 	cpsw_remove_dt(pdev);
3779 	pm_runtime_put_sync(&pdev->dev);
3780 clean_runtime_disable_ret:
3781 	pm_runtime_disable(&pdev->dev);
3782 clean_ndev_ret:
3783 	free_netdev(priv->ndev);
3784 	return ret;
3785 }
3786 
3787 static int cpsw_remove(struct platform_device *pdev)
3788 {
3789 	struct net_device *ndev = platform_get_drvdata(pdev);
3790 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3791 	int ret;
3792 
3793 	ret = pm_runtime_get_sync(&pdev->dev);
3794 	if (ret < 0) {
3795 		pm_runtime_put_noidle(&pdev->dev);
3796 		return ret;
3797 	}
3798 
3799 	if (cpsw->data.dual_emac)
3800 		unregister_netdev(cpsw->slaves[1].ndev);
3801 	unregister_netdev(ndev);
3802 
3803 	cpts_release(cpsw->cpts);
3804 	cpdma_ctlr_destroy(cpsw->dma);
3805 	cpsw_remove_dt(pdev);
3806 	pm_runtime_put_sync(&pdev->dev);
3807 	pm_runtime_disable(&pdev->dev);
3808 	if (cpsw->data.dual_emac)
3809 		free_netdev(cpsw->slaves[1].ndev);
3810 	free_netdev(ndev);
3811 	return 0;
3812 }
3813 
3814 #ifdef CONFIG_PM_SLEEP
3815 static int cpsw_suspend(struct device *dev)
3816 {
3817 	struct net_device	*ndev = dev_get_drvdata(dev);
3818 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3819 
3820 	if (cpsw->data.dual_emac) {
3821 		int i;
3822 
3823 		for (i = 0; i < cpsw->data.slaves; i++) {
3824 			if (netif_running(cpsw->slaves[i].ndev))
3825 				cpsw_ndo_stop(cpsw->slaves[i].ndev);
3826 		}
3827 	} else {
3828 		if (netif_running(ndev))
3829 			cpsw_ndo_stop(ndev);
3830 	}
3831 
3832 	/* Select sleep pin state */
3833 	pinctrl_pm_select_sleep_state(dev);
3834 
3835 	return 0;
3836 }
3837 
3838 static int cpsw_resume(struct device *dev)
3839 {
3840 	struct net_device	*ndev = dev_get_drvdata(dev);
3841 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3842 
3843 	/* Select default pin state */
3844 	pinctrl_pm_select_default_state(dev);
3845 
3846 	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3847 	rtnl_lock();
3848 	if (cpsw->data.dual_emac) {
3849 		int i;
3850 
3851 		for (i = 0; i < cpsw->data.slaves; i++) {
3852 			if (netif_running(cpsw->slaves[i].ndev))
3853 				cpsw_ndo_open(cpsw->slaves[i].ndev);
3854 		}
3855 	} else {
3856 		if (netif_running(ndev))
3857 			cpsw_ndo_open(ndev);
3858 	}
3859 	rtnl_unlock();
3860 
3861 	return 0;
3862 }
3863 #endif
3864 
3865 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3866 
3867 static struct platform_driver cpsw_driver = {
3868 	.driver = {
3869 		.name	 = "cpsw",
3870 		.pm	 = &cpsw_pm_ops,
3871 		.of_match_table = cpsw_of_mtable,
3872 	},
3873 	.probe = cpsw_probe,
3874 	.remove = cpsw_remove,
3875 };
3876 
3877 module_platform_driver(cpsw_driver);
3878 
3879 MODULE_LICENSE("GPL");
3880 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3881 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3882 MODULE_DESCRIPTION("TI CPSW Ethernet driver");
3883