xref: /openbmc/linux/drivers/net/ethernet/ti/cpsw.c (revision 3c6a73cc)
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/of.h>
33 #include <linux/of_net.h>
34 #include <linux/of_device.h>
35 #include <linux/if_vlan.h>
36 #include <linux/mfd/syscon.h>
37 #include <linux/regmap.h>
38 
39 #include <linux/pinctrl/consumer.h>
40 
41 #include "cpsw.h"
42 #include "cpsw_ale.h"
43 #include "cpts.h"
44 #include "davinci_cpdma.h"
45 
46 #define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
47 			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
48 			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
49 			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
50 			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
51 			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
52 			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
53 			 NETIF_MSG_RX_STATUS)
54 
55 #define cpsw_info(priv, type, format, ...)		\
56 do {								\
57 	if (netif_msg_##type(priv) && net_ratelimit())		\
58 		dev_info(priv->dev, format, ## __VA_ARGS__);	\
59 } while (0)
60 
61 #define cpsw_err(priv, type, format, ...)		\
62 do {								\
63 	if (netif_msg_##type(priv) && net_ratelimit())		\
64 		dev_err(priv->dev, format, ## __VA_ARGS__);	\
65 } while (0)
66 
67 #define cpsw_dbg(priv, type, format, ...)		\
68 do {								\
69 	if (netif_msg_##type(priv) && net_ratelimit())		\
70 		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
71 } while (0)
72 
73 #define cpsw_notice(priv, type, format, ...)		\
74 do {								\
75 	if (netif_msg_##type(priv) && net_ratelimit())		\
76 		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
77 } while (0)
78 
79 #define ALE_ALL_PORTS		0x7
80 
81 #define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
82 #define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
83 #define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
84 
85 #define CPSW_VERSION_1		0x19010a
86 #define CPSW_VERSION_2		0x19010c
87 #define CPSW_VERSION_3		0x19010f
88 #define CPSW_VERSION_4		0x190112
89 
90 #define HOST_PORT_NUM		0
91 #define SLIVER_SIZE		0x40
92 
93 #define CPSW1_HOST_PORT_OFFSET	0x028
94 #define CPSW1_SLAVE_OFFSET	0x050
95 #define CPSW1_SLAVE_SIZE	0x040
96 #define CPSW1_CPDMA_OFFSET	0x100
97 #define CPSW1_STATERAM_OFFSET	0x200
98 #define CPSW1_HW_STATS		0x400
99 #define CPSW1_CPTS_OFFSET	0x500
100 #define CPSW1_ALE_OFFSET	0x600
101 #define CPSW1_SLIVER_OFFSET	0x700
102 
103 #define CPSW2_HOST_PORT_OFFSET	0x108
104 #define CPSW2_SLAVE_OFFSET	0x200
105 #define CPSW2_SLAVE_SIZE	0x100
106 #define CPSW2_CPDMA_OFFSET	0x800
107 #define CPSW2_HW_STATS		0x900
108 #define CPSW2_STATERAM_OFFSET	0xa00
109 #define CPSW2_CPTS_OFFSET	0xc00
110 #define CPSW2_ALE_OFFSET	0xd00
111 #define CPSW2_SLIVER_OFFSET	0xd80
112 #define CPSW2_BD_OFFSET		0x2000
113 
114 #define CPDMA_RXTHRESH		0x0c0
115 #define CPDMA_RXFREE		0x0e0
116 #define CPDMA_TXHDP		0x00
117 #define CPDMA_RXHDP		0x20
118 #define CPDMA_TXCP		0x40
119 #define CPDMA_RXCP		0x60
120 
121 #define CPSW_POLL_WEIGHT	64
122 #define CPSW_MIN_PACKET_SIZE	60
123 #define CPSW_MAX_PACKET_SIZE	(1500 + 14 + 4 + 4)
124 
125 #define RX_PRIORITY_MAPPING	0x76543210
126 #define TX_PRIORITY_MAPPING	0x33221100
127 #define CPDMA_TX_PRIORITY_MAP	0x76543210
128 
129 #define CPSW_VLAN_AWARE		BIT(1)
130 #define CPSW_ALE_VLAN_AWARE	1
131 
132 #define CPSW_FIFO_NORMAL_MODE		(0 << 15)
133 #define CPSW_FIFO_DUAL_MAC_MODE		(1 << 15)
134 #define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 15)
135 
136 #define CPSW_INTPACEEN		(0x3f << 16)
137 #define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
138 #define CPSW_CMINTMAX_CNT	63
139 #define CPSW_CMINTMIN_CNT	2
140 #define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
141 #define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)
142 
143 #define cpsw_enable_irq(priv)	\
144 	do {			\
145 		u32 i;		\
146 		for (i = 0; i < priv->num_irqs; i++) \
147 			enable_irq(priv->irqs_table[i]); \
148 	} while (0)
149 #define cpsw_disable_irq(priv)	\
150 	do {			\
151 		u32 i;		\
152 		for (i = 0; i < priv->num_irqs; i++) \
153 			disable_irq_nosync(priv->irqs_table[i]); \
154 	} while (0)
155 
156 #define cpsw_slave_index(priv)				\
157 		((priv->data.dual_emac) ? priv->emac_port :	\
158 		priv->data.active_slave)
159 
160 static int debug_level;
161 module_param(debug_level, int, 0);
162 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
163 
164 static int ale_ageout = 10;
165 module_param(ale_ageout, int, 0);
166 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
167 
168 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
169 module_param(rx_packet_max, int, 0);
170 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
171 
172 struct cpsw_wr_regs {
173 	u32	id_ver;
174 	u32	soft_reset;
175 	u32	control;
176 	u32	int_control;
177 	u32	rx_thresh_en;
178 	u32	rx_en;
179 	u32	tx_en;
180 	u32	misc_en;
181 	u32	mem_allign1[8];
182 	u32	rx_thresh_stat;
183 	u32	rx_stat;
184 	u32	tx_stat;
185 	u32	misc_stat;
186 	u32	mem_allign2[8];
187 	u32	rx_imax;
188 	u32	tx_imax;
189 
190 };
191 
192 struct cpsw_ss_regs {
193 	u32	id_ver;
194 	u32	control;
195 	u32	soft_reset;
196 	u32	stat_port_en;
197 	u32	ptype;
198 	u32	soft_idle;
199 	u32	thru_rate;
200 	u32	gap_thresh;
201 	u32	tx_start_wds;
202 	u32	flow_control;
203 	u32	vlan_ltype;
204 	u32	ts_ltype;
205 	u32	dlr_ltype;
206 };
207 
208 /* CPSW_PORT_V1 */
209 #define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
210 #define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
211 #define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
212 #define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
213 #define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
214 #define CPSW1_TS_CTL        0x14 /* Time Sync Control */
215 #define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
216 #define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
217 
218 /* CPSW_PORT_V2 */
219 #define CPSW2_CONTROL       0x00 /* Control Register */
220 #define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
221 #define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
222 #define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
223 #define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
224 #define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
225 #define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
226 
227 /* CPSW_PORT_V1 and V2 */
228 #define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
229 #define SA_HI               0x24 /* CPGMAC_SL Source Address High */
230 #define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
231 
232 /* CPSW_PORT_V2 only */
233 #define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
234 #define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
235 #define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
236 #define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
237 #define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
238 #define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
239 #define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
240 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
241 
242 /* Bit definitions for the CPSW2_CONTROL register */
243 #define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
244 #define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
245 #define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
246 #define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
247 #define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
248 #define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
249 #define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
250 #define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
251 #define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
252 #define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
253 #define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
254 #define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
255 #define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
256 #define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
257 #define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
258 #define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
259 #define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
260 
261 #define CTRL_V2_TS_BITS \
262 	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
263 	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
264 
265 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
266 #define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
267 #define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
268 
269 
270 #define CTRL_V3_TS_BITS \
271 	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
272 	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
273 	 TS_LTYPE1_EN)
274 
275 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
276 #define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
277 #define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
278 
279 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
280 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
281 #define TS_SEQ_ID_OFFSET_MASK    (0x3f)
282 #define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
283 #define TS_MSG_TYPE_EN_MASK      (0xffff)
284 
285 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
286 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
287 
288 /* Bit definitions for the CPSW1_TS_CTL register */
289 #define CPSW_V1_TS_RX_EN		BIT(0)
290 #define CPSW_V1_TS_TX_EN		BIT(4)
291 #define CPSW_V1_MSG_TYPE_OFS		16
292 
293 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
294 #define CPSW_V1_SEQ_ID_OFS_SHIFT	16
295 
296 struct cpsw_host_regs {
297 	u32	max_blks;
298 	u32	blk_cnt;
299 	u32	tx_in_ctl;
300 	u32	port_vlan;
301 	u32	tx_pri_map;
302 	u32	cpdma_tx_pri_map;
303 	u32	cpdma_rx_chan_map;
304 };
305 
306 struct cpsw_sliver_regs {
307 	u32	id_ver;
308 	u32	mac_control;
309 	u32	mac_status;
310 	u32	soft_reset;
311 	u32	rx_maxlen;
312 	u32	__reserved_0;
313 	u32	rx_pause;
314 	u32	tx_pause;
315 	u32	__reserved_1;
316 	u32	rx_pri_map;
317 };
318 
319 struct cpsw_hw_stats {
320 	u32	rxgoodframes;
321 	u32	rxbroadcastframes;
322 	u32	rxmulticastframes;
323 	u32	rxpauseframes;
324 	u32	rxcrcerrors;
325 	u32	rxaligncodeerrors;
326 	u32	rxoversizedframes;
327 	u32	rxjabberframes;
328 	u32	rxundersizedframes;
329 	u32	rxfragments;
330 	u32	__pad_0[2];
331 	u32	rxoctets;
332 	u32	txgoodframes;
333 	u32	txbroadcastframes;
334 	u32	txmulticastframes;
335 	u32	txpauseframes;
336 	u32	txdeferredframes;
337 	u32	txcollisionframes;
338 	u32	txsinglecollframes;
339 	u32	txmultcollframes;
340 	u32	txexcessivecollisions;
341 	u32	txlatecollisions;
342 	u32	txunderrun;
343 	u32	txcarriersenseerrors;
344 	u32	txoctets;
345 	u32	octetframes64;
346 	u32	octetframes65t127;
347 	u32	octetframes128t255;
348 	u32	octetframes256t511;
349 	u32	octetframes512t1023;
350 	u32	octetframes1024tup;
351 	u32	netoctets;
352 	u32	rxsofoverruns;
353 	u32	rxmofoverruns;
354 	u32	rxdmaoverruns;
355 };
356 
357 struct cpsw_slave {
358 	void __iomem			*regs;
359 	struct cpsw_sliver_regs __iomem	*sliver;
360 	int				slave_num;
361 	u32				mac_control;
362 	struct cpsw_slave_data		*data;
363 	struct phy_device		*phy;
364 	struct net_device		*ndev;
365 	u32				port_vlan;
366 	u32				open_stat;
367 };
368 
369 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
370 {
371 	return __raw_readl(slave->regs + offset);
372 }
373 
374 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
375 {
376 	__raw_writel(val, slave->regs + offset);
377 }
378 
379 struct cpsw_priv {
380 	spinlock_t			lock;
381 	struct platform_device		*pdev;
382 	struct net_device		*ndev;
383 	struct napi_struct		napi;
384 	struct device			*dev;
385 	struct cpsw_platform_data	data;
386 	struct cpsw_ss_regs __iomem	*regs;
387 	struct cpsw_wr_regs __iomem	*wr_regs;
388 	u8 __iomem			*hw_stats;
389 	struct cpsw_host_regs __iomem	*host_port_regs;
390 	u32				msg_enable;
391 	u32				version;
392 	u32				coal_intvl;
393 	u32				bus_freq_mhz;
394 	int				rx_packet_max;
395 	int				host_port;
396 	struct clk			*clk;
397 	u8				mac_addr[ETH_ALEN];
398 	struct cpsw_slave		*slaves;
399 	struct cpdma_ctlr		*dma;
400 	struct cpdma_chan		*txch, *rxch;
401 	struct cpsw_ale			*ale;
402 	bool				rx_pause;
403 	bool				tx_pause;
404 	/* snapshot of IRQ numbers */
405 	u32 irqs_table[4];
406 	u32 num_irqs;
407 	bool irq_enabled;
408 	struct cpts *cpts;
409 	u32 emac_port;
410 };
411 
412 struct cpsw_stats {
413 	char stat_string[ETH_GSTRING_LEN];
414 	int type;
415 	int sizeof_stat;
416 	int stat_offset;
417 };
418 
419 enum {
420 	CPSW_STATS,
421 	CPDMA_RX_STATS,
422 	CPDMA_TX_STATS,
423 };
424 
425 #define CPSW_STAT(m)		CPSW_STATS,				\
426 				sizeof(((struct cpsw_hw_stats *)0)->m), \
427 				offsetof(struct cpsw_hw_stats, m)
428 #define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
429 				sizeof(((struct cpdma_chan_stats *)0)->m), \
430 				offsetof(struct cpdma_chan_stats, m)
431 #define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
432 				sizeof(((struct cpdma_chan_stats *)0)->m), \
433 				offsetof(struct cpdma_chan_stats, m)
434 
435 static const struct cpsw_stats cpsw_gstrings_stats[] = {
436 	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
437 	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
438 	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
439 	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
440 	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
441 	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
442 	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
443 	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
444 	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
445 	{ "Rx Fragments", CPSW_STAT(rxfragments) },
446 	{ "Rx Octets", CPSW_STAT(rxoctets) },
447 	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
448 	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
449 	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
450 	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
451 	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
452 	{ "Collisions", CPSW_STAT(txcollisionframes) },
453 	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
454 	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
455 	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
456 	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
457 	{ "Tx Underrun", CPSW_STAT(txunderrun) },
458 	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
459 	{ "Tx Octets", CPSW_STAT(txoctets) },
460 	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
461 	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
462 	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
463 	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
464 	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
465 	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
466 	{ "Net Octets", CPSW_STAT(netoctets) },
467 	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
468 	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
469 	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
470 	{ "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
471 	{ "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
472 	{ "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
473 	{ "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
474 	{ "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
475 	{ "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
476 	{ "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
477 	{ "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
478 	{ "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
479 	{ "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
480 	{ "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
481 	{ "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
482 	{ "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
483 	{ "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
484 	{ "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
485 	{ "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
486 	{ "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
487 	{ "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
488 	{ "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
489 	{ "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
490 	{ "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
491 	{ "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
492 	{ "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
493 	{ "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
494 	{ "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
495 	{ "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
496 };
497 
498 #define CPSW_STATS_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
499 
500 #define napi_to_priv(napi)	container_of(napi, struct cpsw_priv, napi)
501 #define for_each_slave(priv, func, arg...)				\
502 	do {								\
503 		struct cpsw_slave *slave;				\
504 		int n;							\
505 		if (priv->data.dual_emac)				\
506 			(func)((priv)->slaves + priv->emac_port, ##arg);\
507 		else							\
508 			for (n = (priv)->data.slaves,			\
509 					slave = (priv)->slaves;		\
510 					n; n--)				\
511 				(func)(slave++, ##arg);			\
512 	} while (0)
513 #define cpsw_get_slave_ndev(priv, __slave_no__)				\
514 	(priv->slaves[__slave_no__].ndev)
515 #define cpsw_get_slave_priv(priv, __slave_no__)				\
516 	((priv->slaves[__slave_no__].ndev) ?				\
517 		netdev_priv(priv->slaves[__slave_no__].ndev) : NULL)	\
518 
519 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb)		\
520 	do {								\
521 		if (!priv->data.dual_emac)				\
522 			break;						\
523 		if (CPDMA_RX_SOURCE_PORT(status) == 1) {		\
524 			ndev = cpsw_get_slave_ndev(priv, 0);		\
525 			priv = netdev_priv(ndev);			\
526 			skb->dev = ndev;				\
527 		} else if (CPDMA_RX_SOURCE_PORT(status) == 2) {		\
528 			ndev = cpsw_get_slave_ndev(priv, 1);		\
529 			priv = netdev_priv(ndev);			\
530 			skb->dev = ndev;				\
531 		}							\
532 	} while (0)
533 #define cpsw_add_mcast(priv, addr)					\
534 	do {								\
535 		if (priv->data.dual_emac) {				\
536 			struct cpsw_slave *slave = priv->slaves +	\
537 						priv->emac_port;	\
538 			int slave_port = cpsw_get_slave_port(priv,	\
539 						slave->slave_num);	\
540 			cpsw_ale_add_mcast(priv->ale, addr,		\
541 				1 << slave_port | 1 << priv->host_port,	\
542 				ALE_VLAN, slave->port_vlan, 0);		\
543 		} else {						\
544 			cpsw_ale_add_mcast(priv->ale, addr,		\
545 				ALE_ALL_PORTS << priv->host_port,	\
546 				0, 0, 0);				\
547 		}							\
548 	} while (0)
549 
550 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
551 {
552 	if (priv->host_port == 0)
553 		return slave_num + 1;
554 	else
555 		return slave_num;
556 }
557 
558 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
559 {
560 	struct cpsw_priv *priv = netdev_priv(ndev);
561 	struct cpsw_ale *ale = priv->ale;
562 	int i;
563 
564 	if (priv->data.dual_emac) {
565 		bool flag = false;
566 
567 		/* Enabling promiscuous mode for one interface will be
568 		 * common for both the interface as the interface shares
569 		 * the same hardware resource.
570 		 */
571 		for (i = 0; i < priv->data.slaves; i++)
572 			if (priv->slaves[i].ndev->flags & IFF_PROMISC)
573 				flag = true;
574 
575 		if (!enable && flag) {
576 			enable = true;
577 			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
578 		}
579 
580 		if (enable) {
581 			/* Enable Bypass */
582 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
583 
584 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
585 		} else {
586 			/* Disable Bypass */
587 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
588 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
589 		}
590 	} else {
591 		if (enable) {
592 			unsigned long timeout = jiffies + HZ;
593 
594 			/* Disable Learn for all ports */
595 			for (i = 0; i < priv->data.slaves; i++) {
596 				cpsw_ale_control_set(ale, i,
597 						     ALE_PORT_NOLEARN, 1);
598 				cpsw_ale_control_set(ale, i,
599 						     ALE_PORT_NO_SA_UPDATE, 1);
600 			}
601 
602 			/* Clear All Untouched entries */
603 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
604 			do {
605 				cpu_relax();
606 				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
607 					break;
608 			} while (time_after(timeout, jiffies));
609 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
610 
611 			/* Clear all mcast from ALE */
612 			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
613 						 priv->host_port);
614 
615 			/* Flood All Unicast Packets to Host port */
616 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
617 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
618 		} else {
619 			/* Flood All Unicast Packets to Host port */
620 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
621 
622 			/* Enable Learn for all ports */
623 			for (i = 0; i < priv->data.slaves; i++) {
624 				cpsw_ale_control_set(ale, i,
625 						     ALE_PORT_NOLEARN, 0);
626 				cpsw_ale_control_set(ale, i,
627 						     ALE_PORT_NO_SA_UPDATE, 0);
628 			}
629 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
630 		}
631 	}
632 }
633 
634 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
635 {
636 	struct cpsw_priv *priv = netdev_priv(ndev);
637 
638 	if (ndev->flags & IFF_PROMISC) {
639 		/* Enable promiscuous mode */
640 		cpsw_set_promiscious(ndev, true);
641 		return;
642 	} else {
643 		/* Disable promiscuous mode */
644 		cpsw_set_promiscious(ndev, false);
645 	}
646 
647 	/* Clear all mcast from ALE */
648 	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
649 
650 	if (!netdev_mc_empty(ndev)) {
651 		struct netdev_hw_addr *ha;
652 
653 		/* program multicast address list into ALE register */
654 		netdev_for_each_mc_addr(ha, ndev) {
655 			cpsw_add_mcast(priv, (u8 *)ha->addr);
656 		}
657 	}
658 }
659 
660 static void cpsw_intr_enable(struct cpsw_priv *priv)
661 {
662 	__raw_writel(0xFF, &priv->wr_regs->tx_en);
663 	__raw_writel(0xFF, &priv->wr_regs->rx_en);
664 
665 	cpdma_ctlr_int_ctrl(priv->dma, true);
666 	return;
667 }
668 
669 static void cpsw_intr_disable(struct cpsw_priv *priv)
670 {
671 	__raw_writel(0, &priv->wr_regs->tx_en);
672 	__raw_writel(0, &priv->wr_regs->rx_en);
673 
674 	cpdma_ctlr_int_ctrl(priv->dma, false);
675 	return;
676 }
677 
678 static void cpsw_tx_handler(void *token, int len, int status)
679 {
680 	struct sk_buff		*skb = token;
681 	struct net_device	*ndev = skb->dev;
682 	struct cpsw_priv	*priv = netdev_priv(ndev);
683 
684 	/* Check whether the queue is stopped due to stalled tx dma, if the
685 	 * queue is stopped then start the queue as we have free desc for tx
686 	 */
687 	if (unlikely(netif_queue_stopped(ndev)))
688 		netif_wake_queue(ndev);
689 	cpts_tx_timestamp(priv->cpts, skb);
690 	ndev->stats.tx_packets++;
691 	ndev->stats.tx_bytes += len;
692 	dev_kfree_skb_any(skb);
693 }
694 
695 static void cpsw_rx_handler(void *token, int len, int status)
696 {
697 	struct sk_buff		*skb = token;
698 	struct sk_buff		*new_skb;
699 	struct net_device	*ndev = skb->dev;
700 	struct cpsw_priv	*priv = netdev_priv(ndev);
701 	int			ret = 0;
702 
703 	cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
704 
705 	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
706 		bool ndev_status = false;
707 		struct cpsw_slave *slave = priv->slaves;
708 		int n;
709 
710 		if (priv->data.dual_emac) {
711 			/* In dual emac mode check for all interfaces */
712 			for (n = priv->data.slaves; n; n--, slave++)
713 				if (netif_running(slave->ndev))
714 					ndev_status = true;
715 		}
716 
717 		if (ndev_status && (status >= 0)) {
718 			/* The packet received is for the interface which
719 			 * is already down and the other interface is up
720 			 * and running, intead of freeing which results
721 			 * in reducing of the number of rx descriptor in
722 			 * DMA engine, requeue skb back to cpdma.
723 			 */
724 			new_skb = skb;
725 			goto requeue;
726 		}
727 
728 		/* the interface is going down, skbs are purged */
729 		dev_kfree_skb_any(skb);
730 		return;
731 	}
732 
733 	new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
734 	if (new_skb) {
735 		skb_put(skb, len);
736 		cpts_rx_timestamp(priv->cpts, skb);
737 		skb->protocol = eth_type_trans(skb, ndev);
738 		netif_receive_skb(skb);
739 		ndev->stats.rx_bytes += len;
740 		ndev->stats.rx_packets++;
741 	} else {
742 		ndev->stats.rx_dropped++;
743 		new_skb = skb;
744 	}
745 
746 requeue:
747 	ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
748 			skb_tailroom(new_skb), 0);
749 	if (WARN_ON(ret < 0))
750 		dev_kfree_skb_any(new_skb);
751 }
752 
753 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
754 {
755 	struct cpsw_priv *priv = dev_id;
756 
757 	cpsw_intr_disable(priv);
758 	if (priv->irq_enabled == true) {
759 		cpsw_disable_irq(priv);
760 		priv->irq_enabled = false;
761 	}
762 
763 	if (netif_running(priv->ndev)) {
764 		napi_schedule(&priv->napi);
765 		return IRQ_HANDLED;
766 	}
767 
768 	priv = cpsw_get_slave_priv(priv, 1);
769 	if (!priv)
770 		return IRQ_NONE;
771 
772 	if (netif_running(priv->ndev)) {
773 		napi_schedule(&priv->napi);
774 		return IRQ_HANDLED;
775 	}
776 	return IRQ_NONE;
777 }
778 
779 static int cpsw_poll(struct napi_struct *napi, int budget)
780 {
781 	struct cpsw_priv	*priv = napi_to_priv(napi);
782 	int			num_tx, num_rx;
783 
784 	num_tx = cpdma_chan_process(priv->txch, 128);
785 	if (num_tx)
786 		cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
787 
788 	num_rx = cpdma_chan_process(priv->rxch, budget);
789 	if (num_rx < budget) {
790 		struct cpsw_priv *prim_cpsw;
791 
792 		napi_complete(napi);
793 		cpsw_intr_enable(priv);
794 		cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
795 		prim_cpsw = cpsw_get_slave_priv(priv, 0);
796 		if (prim_cpsw->irq_enabled == false) {
797 			prim_cpsw->irq_enabled = true;
798 			cpsw_enable_irq(priv);
799 		}
800 	}
801 
802 	if (num_rx || num_tx)
803 		cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
804 			 num_rx, num_tx);
805 
806 	return num_rx;
807 }
808 
809 static inline void soft_reset(const char *module, void __iomem *reg)
810 {
811 	unsigned long timeout = jiffies + HZ;
812 
813 	__raw_writel(1, reg);
814 	do {
815 		cpu_relax();
816 	} while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
817 
818 	WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
819 }
820 
821 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
822 			 ((mac)[2] << 16) | ((mac)[3] << 24))
823 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
824 
825 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
826 			       struct cpsw_priv *priv)
827 {
828 	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
829 	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
830 }
831 
832 static void _cpsw_adjust_link(struct cpsw_slave *slave,
833 			      struct cpsw_priv *priv, bool *link)
834 {
835 	struct phy_device	*phy = slave->phy;
836 	u32			mac_control = 0;
837 	u32			slave_port;
838 
839 	if (!phy)
840 		return;
841 
842 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
843 
844 	if (phy->link) {
845 		mac_control = priv->data.mac_control;
846 
847 		/* enable forwarding */
848 		cpsw_ale_control_set(priv->ale, slave_port,
849 				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
850 
851 		if (phy->speed == 1000)
852 			mac_control |= BIT(7);	/* GIGABITEN	*/
853 		if (phy->duplex)
854 			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
855 
856 		/* set speed_in input in case RMII mode is used in 100Mbps */
857 		if (phy->speed == 100)
858 			mac_control |= BIT(15);
859 		else if (phy->speed == 10)
860 			mac_control |= BIT(18); /* In Band mode */
861 
862 		if (priv->rx_pause)
863 			mac_control |= BIT(3);
864 
865 		if (priv->tx_pause)
866 			mac_control |= BIT(4);
867 
868 		*link = true;
869 	} else {
870 		mac_control = 0;
871 		/* disable forwarding */
872 		cpsw_ale_control_set(priv->ale, slave_port,
873 				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
874 	}
875 
876 	if (mac_control != slave->mac_control) {
877 		phy_print_status(phy);
878 		__raw_writel(mac_control, &slave->sliver->mac_control);
879 	}
880 
881 	slave->mac_control = mac_control;
882 }
883 
884 static void cpsw_adjust_link(struct net_device *ndev)
885 {
886 	struct cpsw_priv	*priv = netdev_priv(ndev);
887 	bool			link = false;
888 
889 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
890 
891 	if (link) {
892 		netif_carrier_on(ndev);
893 		if (netif_running(ndev))
894 			netif_wake_queue(ndev);
895 	} else {
896 		netif_carrier_off(ndev);
897 		netif_stop_queue(ndev);
898 	}
899 }
900 
901 static int cpsw_get_coalesce(struct net_device *ndev,
902 				struct ethtool_coalesce *coal)
903 {
904 	struct cpsw_priv *priv = netdev_priv(ndev);
905 
906 	coal->rx_coalesce_usecs = priv->coal_intvl;
907 	return 0;
908 }
909 
910 static int cpsw_set_coalesce(struct net_device *ndev,
911 				struct ethtool_coalesce *coal)
912 {
913 	struct cpsw_priv *priv = netdev_priv(ndev);
914 	u32 int_ctrl;
915 	u32 num_interrupts = 0;
916 	u32 prescale = 0;
917 	u32 addnl_dvdr = 1;
918 	u32 coal_intvl = 0;
919 
920 	coal_intvl = coal->rx_coalesce_usecs;
921 
922 	int_ctrl =  readl(&priv->wr_regs->int_control);
923 	prescale = priv->bus_freq_mhz * 4;
924 
925 	if (!coal->rx_coalesce_usecs) {
926 		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
927 		goto update_return;
928 	}
929 
930 	if (coal_intvl < CPSW_CMINTMIN_INTVL)
931 		coal_intvl = CPSW_CMINTMIN_INTVL;
932 
933 	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
934 		/* Interrupt pacer works with 4us Pulse, we can
935 		 * throttle further by dilating the 4us pulse.
936 		 */
937 		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
938 
939 		if (addnl_dvdr > 1) {
940 			prescale *= addnl_dvdr;
941 			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
942 				coal_intvl = (CPSW_CMINTMAX_INTVL
943 						* addnl_dvdr);
944 		} else {
945 			addnl_dvdr = 1;
946 			coal_intvl = CPSW_CMINTMAX_INTVL;
947 		}
948 	}
949 
950 	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
951 	writel(num_interrupts, &priv->wr_regs->rx_imax);
952 	writel(num_interrupts, &priv->wr_regs->tx_imax);
953 
954 	int_ctrl |= CPSW_INTPACEEN;
955 	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
956 	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
957 
958 update_return:
959 	writel(int_ctrl, &priv->wr_regs->int_control);
960 
961 	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
962 	if (priv->data.dual_emac) {
963 		int i;
964 
965 		for (i = 0; i < priv->data.slaves; i++) {
966 			priv = netdev_priv(priv->slaves[i].ndev);
967 			priv->coal_intvl = coal_intvl;
968 		}
969 	} else {
970 		priv->coal_intvl = coal_intvl;
971 	}
972 
973 	return 0;
974 }
975 
976 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
977 {
978 	switch (sset) {
979 	case ETH_SS_STATS:
980 		return CPSW_STATS_LEN;
981 	default:
982 		return -EOPNOTSUPP;
983 	}
984 }
985 
986 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
987 {
988 	u8 *p = data;
989 	int i;
990 
991 	switch (stringset) {
992 	case ETH_SS_STATS:
993 		for (i = 0; i < CPSW_STATS_LEN; i++) {
994 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
995 			       ETH_GSTRING_LEN);
996 			p += ETH_GSTRING_LEN;
997 		}
998 		break;
999 	}
1000 }
1001 
1002 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1003 				    struct ethtool_stats *stats, u64 *data)
1004 {
1005 	struct cpsw_priv *priv = netdev_priv(ndev);
1006 	struct cpdma_chan_stats rx_stats;
1007 	struct cpdma_chan_stats tx_stats;
1008 	u32 val;
1009 	u8 *p;
1010 	int i;
1011 
1012 	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
1013 	cpdma_chan_get_stats(priv->rxch, &rx_stats);
1014 	cpdma_chan_get_stats(priv->txch, &tx_stats);
1015 
1016 	for (i = 0; i < CPSW_STATS_LEN; i++) {
1017 		switch (cpsw_gstrings_stats[i].type) {
1018 		case CPSW_STATS:
1019 			val = readl(priv->hw_stats +
1020 				    cpsw_gstrings_stats[i].stat_offset);
1021 			data[i] = val;
1022 			break;
1023 
1024 		case CPDMA_RX_STATS:
1025 			p = (u8 *)&rx_stats +
1026 				cpsw_gstrings_stats[i].stat_offset;
1027 			data[i] = *(u32 *)p;
1028 			break;
1029 
1030 		case CPDMA_TX_STATS:
1031 			p = (u8 *)&tx_stats +
1032 				cpsw_gstrings_stats[i].stat_offset;
1033 			data[i] = *(u32 *)p;
1034 			break;
1035 		}
1036 	}
1037 }
1038 
1039 static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
1040 {
1041 	u32 i;
1042 	u32 usage_count = 0;
1043 
1044 	if (!priv->data.dual_emac)
1045 		return 0;
1046 
1047 	for (i = 0; i < priv->data.slaves; i++)
1048 		if (priv->slaves[i].open_stat)
1049 			usage_count++;
1050 
1051 	return usage_count;
1052 }
1053 
1054 static inline int cpsw_tx_packet_submit(struct net_device *ndev,
1055 			struct cpsw_priv *priv, struct sk_buff *skb)
1056 {
1057 	if (!priv->data.dual_emac)
1058 		return cpdma_chan_submit(priv->txch, skb, skb->data,
1059 				  skb->len, 0);
1060 
1061 	if (ndev == cpsw_get_slave_ndev(priv, 0))
1062 		return cpdma_chan_submit(priv->txch, skb, skb->data,
1063 				  skb->len, 1);
1064 	else
1065 		return cpdma_chan_submit(priv->txch, skb, skb->data,
1066 				  skb->len, 2);
1067 }
1068 
1069 static inline void cpsw_add_dual_emac_def_ale_entries(
1070 		struct cpsw_priv *priv, struct cpsw_slave *slave,
1071 		u32 slave_port)
1072 {
1073 	u32 port_mask = 1 << slave_port | 1 << priv->host_port;
1074 
1075 	if (priv->version == CPSW_VERSION_1)
1076 		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1077 	else
1078 		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1079 	cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
1080 			  port_mask, port_mask, 0);
1081 	cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1082 			   port_mask, ALE_VLAN, slave->port_vlan, 0);
1083 	cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1084 		priv->host_port, ALE_VLAN, slave->port_vlan);
1085 }
1086 
1087 static void soft_reset_slave(struct cpsw_slave *slave)
1088 {
1089 	char name[32];
1090 
1091 	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1092 	soft_reset(name, &slave->sliver->soft_reset);
1093 }
1094 
1095 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1096 {
1097 	u32 slave_port;
1098 
1099 	soft_reset_slave(slave);
1100 
1101 	/* setup priority mapping */
1102 	__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1103 
1104 	switch (priv->version) {
1105 	case CPSW_VERSION_1:
1106 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1107 		break;
1108 	case CPSW_VERSION_2:
1109 	case CPSW_VERSION_3:
1110 	case CPSW_VERSION_4:
1111 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1112 		break;
1113 	}
1114 
1115 	/* setup max packet size, and mac address */
1116 	__raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
1117 	cpsw_set_slave_mac(slave, priv);
1118 
1119 	slave->mac_control = 0;	/* no link yet */
1120 
1121 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
1122 
1123 	if (priv->data.dual_emac)
1124 		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1125 	else
1126 		cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1127 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1128 
1129 	slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1130 				 &cpsw_adjust_link, slave->data->phy_if);
1131 	if (IS_ERR(slave->phy)) {
1132 		dev_err(priv->dev, "phy %s not found on slave %d\n",
1133 			slave->data->phy_id, slave->slave_num);
1134 		slave->phy = NULL;
1135 	} else {
1136 		dev_info(priv->dev, "phy found : id is : 0x%x\n",
1137 			 slave->phy->phy_id);
1138 		phy_start(slave->phy);
1139 
1140 		/* Configure GMII_SEL register */
1141 		cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
1142 			     slave->slave_num);
1143 	}
1144 }
1145 
1146 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1147 {
1148 	const int vlan = priv->data.default_vlan;
1149 	const int port = priv->host_port;
1150 	u32 reg;
1151 	int i;
1152 
1153 	reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1154 	       CPSW2_PORT_VLAN;
1155 
1156 	writel(vlan, &priv->host_port_regs->port_vlan);
1157 
1158 	for (i = 0; i < priv->data.slaves; i++)
1159 		slave_write(priv->slaves + i, vlan, reg);
1160 
1161 	cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
1162 			  ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
1163 			  (ALE_PORT_1 | ALE_PORT_2) << port);
1164 }
1165 
1166 static void cpsw_init_host_port(struct cpsw_priv *priv)
1167 {
1168 	u32 control_reg;
1169 	u32 fifo_mode;
1170 
1171 	/* soft reset the controller and initialize ale */
1172 	soft_reset("cpsw", &priv->regs->soft_reset);
1173 	cpsw_ale_start(priv->ale);
1174 
1175 	/* switch to vlan unaware mode */
1176 	cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
1177 			     CPSW_ALE_VLAN_AWARE);
1178 	control_reg = readl(&priv->regs->control);
1179 	control_reg |= CPSW_VLAN_AWARE;
1180 	writel(control_reg, &priv->regs->control);
1181 	fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1182 		     CPSW_FIFO_NORMAL_MODE;
1183 	writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
1184 
1185 	/* setup host port priority mapping */
1186 	__raw_writel(CPDMA_TX_PRIORITY_MAP,
1187 		     &priv->host_port_regs->cpdma_tx_pri_map);
1188 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
1189 
1190 	cpsw_ale_control_set(priv->ale, priv->host_port,
1191 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1192 
1193 	if (!priv->data.dual_emac) {
1194 		cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
1195 				   0, 0);
1196 		cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1197 				   1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
1198 	}
1199 }
1200 
1201 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
1202 {
1203 	u32 slave_port;
1204 
1205 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
1206 
1207 	if (!slave->phy)
1208 		return;
1209 	phy_stop(slave->phy);
1210 	phy_disconnect(slave->phy);
1211 	slave->phy = NULL;
1212 	cpsw_ale_control_set(priv->ale, slave_port,
1213 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1214 }
1215 
1216 static int cpsw_ndo_open(struct net_device *ndev)
1217 {
1218 	struct cpsw_priv *priv = netdev_priv(ndev);
1219 	struct cpsw_priv *prim_cpsw;
1220 	int i, ret;
1221 	u32 reg;
1222 
1223 	if (!cpsw_common_res_usage_state(priv))
1224 		cpsw_intr_disable(priv);
1225 	netif_carrier_off(ndev);
1226 
1227 	pm_runtime_get_sync(&priv->pdev->dev);
1228 
1229 	reg = priv->version;
1230 
1231 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1232 		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1233 		 CPSW_RTL_VERSION(reg));
1234 
1235 	/* initialize host and slave ports */
1236 	if (!cpsw_common_res_usage_state(priv))
1237 		cpsw_init_host_port(priv);
1238 	for_each_slave(priv, cpsw_slave_open, priv);
1239 
1240 	/* Add default VLAN */
1241 	if (!priv->data.dual_emac)
1242 		cpsw_add_default_vlan(priv);
1243 	else
1244 		cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
1245 				  ALE_ALL_PORTS << priv->host_port,
1246 				  ALE_ALL_PORTS << priv->host_port, 0, 0);
1247 
1248 	if (!cpsw_common_res_usage_state(priv)) {
1249 		/* setup tx dma to fixed prio and zero offset */
1250 		cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
1251 		cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
1252 
1253 		/* disable priority elevation */
1254 		__raw_writel(0, &priv->regs->ptype);
1255 
1256 		/* enable statistics collection only on all ports */
1257 		__raw_writel(0x7, &priv->regs->stat_port_en);
1258 
1259 		/* Enable internal fifo flow control */
1260 		writel(0x7, &priv->regs->flow_control);
1261 
1262 		if (WARN_ON(!priv->data.rx_descs))
1263 			priv->data.rx_descs = 128;
1264 
1265 		for (i = 0; i < priv->data.rx_descs; i++) {
1266 			struct sk_buff *skb;
1267 
1268 			ret = -ENOMEM;
1269 			skb = __netdev_alloc_skb_ip_align(priv->ndev,
1270 					priv->rx_packet_max, GFP_KERNEL);
1271 			if (!skb)
1272 				goto err_cleanup;
1273 			ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
1274 					skb_tailroom(skb), 0);
1275 			if (ret < 0) {
1276 				kfree_skb(skb);
1277 				goto err_cleanup;
1278 			}
1279 		}
1280 		/* continue even if we didn't manage to submit all
1281 		 * receive descs
1282 		 */
1283 		cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
1284 
1285 		if (cpts_register(&priv->pdev->dev, priv->cpts,
1286 				  priv->data.cpts_clock_mult,
1287 				  priv->data.cpts_clock_shift))
1288 			dev_err(priv->dev, "error registering cpts device\n");
1289 
1290 	}
1291 
1292 	/* Enable Interrupt pacing if configured */
1293 	if (priv->coal_intvl != 0) {
1294 		struct ethtool_coalesce coal;
1295 
1296 		coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
1297 		cpsw_set_coalesce(ndev, &coal);
1298 	}
1299 
1300 	napi_enable(&priv->napi);
1301 	cpdma_ctlr_start(priv->dma);
1302 	cpsw_intr_enable(priv);
1303 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1304 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1305 
1306 	prim_cpsw = cpsw_get_slave_priv(priv, 0);
1307 	if (prim_cpsw->irq_enabled == false) {
1308 		if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
1309 			prim_cpsw->irq_enabled = true;
1310 			cpsw_enable_irq(prim_cpsw);
1311 		}
1312 	}
1313 
1314 	if (priv->data.dual_emac)
1315 		priv->slaves[priv->emac_port].open_stat = true;
1316 	return 0;
1317 
1318 err_cleanup:
1319 	cpdma_ctlr_stop(priv->dma);
1320 	for_each_slave(priv, cpsw_slave_stop, priv);
1321 	pm_runtime_put_sync(&priv->pdev->dev);
1322 	netif_carrier_off(priv->ndev);
1323 	return ret;
1324 }
1325 
1326 static int cpsw_ndo_stop(struct net_device *ndev)
1327 {
1328 	struct cpsw_priv *priv = netdev_priv(ndev);
1329 
1330 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1331 	netif_stop_queue(priv->ndev);
1332 	napi_disable(&priv->napi);
1333 	netif_carrier_off(priv->ndev);
1334 
1335 	if (cpsw_common_res_usage_state(priv) <= 1) {
1336 		cpts_unregister(priv->cpts);
1337 		cpsw_intr_disable(priv);
1338 		cpdma_ctlr_int_ctrl(priv->dma, false);
1339 		cpdma_ctlr_stop(priv->dma);
1340 		cpsw_ale_stop(priv->ale);
1341 	}
1342 	for_each_slave(priv, cpsw_slave_stop, priv);
1343 	pm_runtime_put_sync(&priv->pdev->dev);
1344 	if (priv->data.dual_emac)
1345 		priv->slaves[priv->emac_port].open_stat = false;
1346 	return 0;
1347 }
1348 
1349 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1350 				       struct net_device *ndev)
1351 {
1352 	struct cpsw_priv *priv = netdev_priv(ndev);
1353 	int ret;
1354 
1355 	ndev->trans_start = jiffies;
1356 
1357 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1358 		cpsw_err(priv, tx_err, "packet pad failed\n");
1359 		ndev->stats.tx_dropped++;
1360 		return NETDEV_TX_OK;
1361 	}
1362 
1363 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1364 				priv->cpts->tx_enable)
1365 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1366 
1367 	skb_tx_timestamp(skb);
1368 
1369 	ret = cpsw_tx_packet_submit(ndev, priv, skb);
1370 	if (unlikely(ret != 0)) {
1371 		cpsw_err(priv, tx_err, "desc submit failed\n");
1372 		goto fail;
1373 	}
1374 
1375 	/* If there is no more tx desc left free then we need to
1376 	 * tell the kernel to stop sending us tx frames.
1377 	 */
1378 	if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
1379 		netif_stop_queue(ndev);
1380 
1381 	return NETDEV_TX_OK;
1382 fail:
1383 	ndev->stats.tx_dropped++;
1384 	netif_stop_queue(ndev);
1385 	return NETDEV_TX_BUSY;
1386 }
1387 
1388 #ifdef CONFIG_TI_CPTS
1389 
1390 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1391 {
1392 	struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
1393 	u32 ts_en, seq_id;
1394 
1395 	if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
1396 		slave_write(slave, 0, CPSW1_TS_CTL);
1397 		return;
1398 	}
1399 
1400 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1401 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1402 
1403 	if (priv->cpts->tx_enable)
1404 		ts_en |= CPSW_V1_TS_TX_EN;
1405 
1406 	if (priv->cpts->rx_enable)
1407 		ts_en |= CPSW_V1_TS_RX_EN;
1408 
1409 	slave_write(slave, ts_en, CPSW1_TS_CTL);
1410 	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1411 }
1412 
1413 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1414 {
1415 	struct cpsw_slave *slave;
1416 	u32 ctrl, mtype;
1417 
1418 	if (priv->data.dual_emac)
1419 		slave = &priv->slaves[priv->emac_port];
1420 	else
1421 		slave = &priv->slaves[priv->data.active_slave];
1422 
1423 	ctrl = slave_read(slave, CPSW2_CONTROL);
1424 	switch (priv->version) {
1425 	case CPSW_VERSION_2:
1426 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
1427 
1428 		if (priv->cpts->tx_enable)
1429 			ctrl |= CTRL_V2_TX_TS_BITS;
1430 
1431 		if (priv->cpts->rx_enable)
1432 			ctrl |= CTRL_V2_RX_TS_BITS;
1433 	break;
1434 	case CPSW_VERSION_3:
1435 	default:
1436 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
1437 
1438 		if (priv->cpts->tx_enable)
1439 			ctrl |= CTRL_V3_TX_TS_BITS;
1440 
1441 		if (priv->cpts->rx_enable)
1442 			ctrl |= CTRL_V3_RX_TS_BITS;
1443 	break;
1444 	}
1445 
1446 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1447 
1448 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1449 	slave_write(slave, ctrl, CPSW2_CONTROL);
1450 	__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
1451 }
1452 
1453 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1454 {
1455 	struct cpsw_priv *priv = netdev_priv(dev);
1456 	struct cpts *cpts = priv->cpts;
1457 	struct hwtstamp_config cfg;
1458 
1459 	if (priv->version != CPSW_VERSION_1 &&
1460 	    priv->version != CPSW_VERSION_2 &&
1461 	    priv->version != CPSW_VERSION_3)
1462 		return -EOPNOTSUPP;
1463 
1464 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1465 		return -EFAULT;
1466 
1467 	/* reserved for future extensions */
1468 	if (cfg.flags)
1469 		return -EINVAL;
1470 
1471 	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1472 		return -ERANGE;
1473 
1474 	switch (cfg.rx_filter) {
1475 	case HWTSTAMP_FILTER_NONE:
1476 		cpts->rx_enable = 0;
1477 		break;
1478 	case HWTSTAMP_FILTER_ALL:
1479 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1480 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1481 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1482 		return -ERANGE;
1483 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1484 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1485 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1486 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1487 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1488 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1489 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1490 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1491 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1492 		cpts->rx_enable = 1;
1493 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1494 		break;
1495 	default:
1496 		return -ERANGE;
1497 	}
1498 
1499 	cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;
1500 
1501 	switch (priv->version) {
1502 	case CPSW_VERSION_1:
1503 		cpsw_hwtstamp_v1(priv);
1504 		break;
1505 	case CPSW_VERSION_2:
1506 	case CPSW_VERSION_3:
1507 		cpsw_hwtstamp_v2(priv);
1508 		break;
1509 	default:
1510 		WARN_ON(1);
1511 	}
1512 
1513 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1514 }
1515 
1516 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1517 {
1518 	struct cpsw_priv *priv = netdev_priv(dev);
1519 	struct cpts *cpts = priv->cpts;
1520 	struct hwtstamp_config cfg;
1521 
1522 	if (priv->version != CPSW_VERSION_1 &&
1523 	    priv->version != CPSW_VERSION_2 &&
1524 	    priv->version != CPSW_VERSION_3)
1525 		return -EOPNOTSUPP;
1526 
1527 	cfg.flags = 0;
1528 	cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1529 	cfg.rx_filter = (cpts->rx_enable ?
1530 			 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
1531 
1532 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1533 }
1534 
1535 #endif /*CONFIG_TI_CPTS*/
1536 
1537 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1538 {
1539 	struct cpsw_priv *priv = netdev_priv(dev);
1540 	int slave_no = cpsw_slave_index(priv);
1541 
1542 	if (!netif_running(dev))
1543 		return -EINVAL;
1544 
1545 	switch (cmd) {
1546 #ifdef CONFIG_TI_CPTS
1547 	case SIOCSHWTSTAMP:
1548 		return cpsw_hwtstamp_set(dev, req);
1549 	case SIOCGHWTSTAMP:
1550 		return cpsw_hwtstamp_get(dev, req);
1551 #endif
1552 	}
1553 
1554 	if (!priv->slaves[slave_no].phy)
1555 		return -EOPNOTSUPP;
1556 	return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
1557 }
1558 
1559 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1560 {
1561 	struct cpsw_priv *priv = netdev_priv(ndev);
1562 
1563 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1564 	ndev->stats.tx_errors++;
1565 	cpsw_intr_disable(priv);
1566 	cpdma_ctlr_int_ctrl(priv->dma, false);
1567 	cpdma_chan_stop(priv->txch);
1568 	cpdma_chan_start(priv->txch);
1569 	cpdma_ctlr_int_ctrl(priv->dma, true);
1570 	cpsw_intr_enable(priv);
1571 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1572 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1573 
1574 }
1575 
1576 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1577 {
1578 	struct cpsw_priv *priv = netdev_priv(ndev);
1579 	struct sockaddr *addr = (struct sockaddr *)p;
1580 	int flags = 0;
1581 	u16 vid = 0;
1582 
1583 	if (!is_valid_ether_addr(addr->sa_data))
1584 		return -EADDRNOTAVAIL;
1585 
1586 	if (priv->data.dual_emac) {
1587 		vid = priv->slaves[priv->emac_port].port_vlan;
1588 		flags = ALE_VLAN;
1589 	}
1590 
1591 	cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
1592 			   flags, vid);
1593 	cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
1594 			   flags, vid);
1595 
1596 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1597 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1598 	for_each_slave(priv, cpsw_set_slave_mac, priv);
1599 
1600 	return 0;
1601 }
1602 
1603 #ifdef CONFIG_NET_POLL_CONTROLLER
1604 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1605 {
1606 	struct cpsw_priv *priv = netdev_priv(ndev);
1607 
1608 	cpsw_intr_disable(priv);
1609 	cpdma_ctlr_int_ctrl(priv->dma, false);
1610 	cpsw_interrupt(ndev->irq, priv);
1611 	cpdma_ctlr_int_ctrl(priv->dma, true);
1612 	cpsw_intr_enable(priv);
1613 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1614 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1615 
1616 }
1617 #endif
1618 
1619 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1620 				unsigned short vid)
1621 {
1622 	int ret;
1623 
1624 	ret = cpsw_ale_add_vlan(priv->ale, vid,
1625 				ALE_ALL_PORTS << priv->host_port,
1626 				0, ALE_ALL_PORTS << priv->host_port,
1627 				(ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
1628 	if (ret != 0)
1629 		return ret;
1630 
1631 	ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1632 				 priv->host_port, ALE_VLAN, vid);
1633 	if (ret != 0)
1634 		goto clean_vid;
1635 
1636 	ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1637 				 ALE_ALL_PORTS << priv->host_port,
1638 				 ALE_VLAN, vid, 0);
1639 	if (ret != 0)
1640 		goto clean_vlan_ucast;
1641 	return 0;
1642 
1643 clean_vlan_ucast:
1644 	cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
1645 			    priv->host_port, ALE_VLAN, vid);
1646 clean_vid:
1647 	cpsw_ale_del_vlan(priv->ale, vid, 0);
1648 	return ret;
1649 }
1650 
1651 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1652 				    __be16 proto, u16 vid)
1653 {
1654 	struct cpsw_priv *priv = netdev_priv(ndev);
1655 
1656 	if (vid == priv->data.default_vlan)
1657 		return 0;
1658 
1659 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1660 	return cpsw_add_vlan_ale_entry(priv, vid);
1661 }
1662 
1663 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1664 				     __be16 proto, u16 vid)
1665 {
1666 	struct cpsw_priv *priv = netdev_priv(ndev);
1667 	int ret;
1668 
1669 	if (vid == priv->data.default_vlan)
1670 		return 0;
1671 
1672 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1673 	ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1674 	if (ret != 0)
1675 		return ret;
1676 
1677 	ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
1678 				 priv->host_port, ALE_VLAN, vid);
1679 	if (ret != 0)
1680 		return ret;
1681 
1682 	return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
1683 				  0, ALE_VLAN, vid);
1684 }
1685 
1686 static const struct net_device_ops cpsw_netdev_ops = {
1687 	.ndo_open		= cpsw_ndo_open,
1688 	.ndo_stop		= cpsw_ndo_stop,
1689 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
1690 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
1691 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
1692 	.ndo_validate_addr	= eth_validate_addr,
1693 	.ndo_change_mtu		= eth_change_mtu,
1694 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
1695 	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
1696 #ifdef CONFIG_NET_POLL_CONTROLLER
1697 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
1698 #endif
1699 	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
1700 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
1701 };
1702 
1703 static int cpsw_get_regs_len(struct net_device *ndev)
1704 {
1705 	struct cpsw_priv *priv = netdev_priv(ndev);
1706 
1707 	return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
1708 }
1709 
1710 static void cpsw_get_regs(struct net_device *ndev,
1711 			  struct ethtool_regs *regs, void *p)
1712 {
1713 	struct cpsw_priv *priv = netdev_priv(ndev);
1714 	u32 *reg = p;
1715 
1716 	/* update CPSW IP version */
1717 	regs->version = priv->version;
1718 
1719 	cpsw_ale_dump(priv->ale, reg);
1720 }
1721 
1722 static void cpsw_get_drvinfo(struct net_device *ndev,
1723 			     struct ethtool_drvinfo *info)
1724 {
1725 	struct cpsw_priv *priv = netdev_priv(ndev);
1726 
1727 	strlcpy(info->driver, "cpsw", sizeof(info->driver));
1728 	strlcpy(info->version, "1.0", sizeof(info->version));
1729 	strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
1730 	info->regdump_len = cpsw_get_regs_len(ndev);
1731 }
1732 
1733 static u32 cpsw_get_msglevel(struct net_device *ndev)
1734 {
1735 	struct cpsw_priv *priv = netdev_priv(ndev);
1736 	return priv->msg_enable;
1737 }
1738 
1739 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
1740 {
1741 	struct cpsw_priv *priv = netdev_priv(ndev);
1742 	priv->msg_enable = value;
1743 }
1744 
1745 static int cpsw_get_ts_info(struct net_device *ndev,
1746 			    struct ethtool_ts_info *info)
1747 {
1748 #ifdef CONFIG_TI_CPTS
1749 	struct cpsw_priv *priv = netdev_priv(ndev);
1750 
1751 	info->so_timestamping =
1752 		SOF_TIMESTAMPING_TX_HARDWARE |
1753 		SOF_TIMESTAMPING_TX_SOFTWARE |
1754 		SOF_TIMESTAMPING_RX_HARDWARE |
1755 		SOF_TIMESTAMPING_RX_SOFTWARE |
1756 		SOF_TIMESTAMPING_SOFTWARE |
1757 		SOF_TIMESTAMPING_RAW_HARDWARE;
1758 	info->phc_index = priv->cpts->phc_index;
1759 	info->tx_types =
1760 		(1 << HWTSTAMP_TX_OFF) |
1761 		(1 << HWTSTAMP_TX_ON);
1762 	info->rx_filters =
1763 		(1 << HWTSTAMP_FILTER_NONE) |
1764 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
1765 #else
1766 	info->so_timestamping =
1767 		SOF_TIMESTAMPING_TX_SOFTWARE |
1768 		SOF_TIMESTAMPING_RX_SOFTWARE |
1769 		SOF_TIMESTAMPING_SOFTWARE;
1770 	info->phc_index = -1;
1771 	info->tx_types = 0;
1772 	info->rx_filters = 0;
1773 #endif
1774 	return 0;
1775 }
1776 
1777 static int cpsw_get_settings(struct net_device *ndev,
1778 			     struct ethtool_cmd *ecmd)
1779 {
1780 	struct cpsw_priv *priv = netdev_priv(ndev);
1781 	int slave_no = cpsw_slave_index(priv);
1782 
1783 	if (priv->slaves[slave_no].phy)
1784 		return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
1785 	else
1786 		return -EOPNOTSUPP;
1787 }
1788 
1789 static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1790 {
1791 	struct cpsw_priv *priv = netdev_priv(ndev);
1792 	int slave_no = cpsw_slave_index(priv);
1793 
1794 	if (priv->slaves[slave_no].phy)
1795 		return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
1796 	else
1797 		return -EOPNOTSUPP;
1798 }
1799 
1800 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1801 {
1802 	struct cpsw_priv *priv = netdev_priv(ndev);
1803 	int slave_no = cpsw_slave_index(priv);
1804 
1805 	wol->supported = 0;
1806 	wol->wolopts = 0;
1807 
1808 	if (priv->slaves[slave_no].phy)
1809 		phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
1810 }
1811 
1812 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1813 {
1814 	struct cpsw_priv *priv = netdev_priv(ndev);
1815 	int slave_no = cpsw_slave_index(priv);
1816 
1817 	if (priv->slaves[slave_no].phy)
1818 		return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
1819 	else
1820 		return -EOPNOTSUPP;
1821 }
1822 
1823 static void cpsw_get_pauseparam(struct net_device *ndev,
1824 				struct ethtool_pauseparam *pause)
1825 {
1826 	struct cpsw_priv *priv = netdev_priv(ndev);
1827 
1828 	pause->autoneg = AUTONEG_DISABLE;
1829 	pause->rx_pause = priv->rx_pause ? true : false;
1830 	pause->tx_pause = priv->tx_pause ? true : false;
1831 }
1832 
1833 static int cpsw_set_pauseparam(struct net_device *ndev,
1834 			       struct ethtool_pauseparam *pause)
1835 {
1836 	struct cpsw_priv *priv = netdev_priv(ndev);
1837 	bool link;
1838 
1839 	priv->rx_pause = pause->rx_pause ? true : false;
1840 	priv->tx_pause = pause->tx_pause ? true : false;
1841 
1842 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1843 
1844 	return 0;
1845 }
1846 
1847 static const struct ethtool_ops cpsw_ethtool_ops = {
1848 	.get_drvinfo	= cpsw_get_drvinfo,
1849 	.get_msglevel	= cpsw_get_msglevel,
1850 	.set_msglevel	= cpsw_set_msglevel,
1851 	.get_link	= ethtool_op_get_link,
1852 	.get_ts_info	= cpsw_get_ts_info,
1853 	.get_settings	= cpsw_get_settings,
1854 	.set_settings	= cpsw_set_settings,
1855 	.get_coalesce	= cpsw_get_coalesce,
1856 	.set_coalesce	= cpsw_set_coalesce,
1857 	.get_sset_count		= cpsw_get_sset_count,
1858 	.get_strings		= cpsw_get_strings,
1859 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
1860 	.get_pauseparam		= cpsw_get_pauseparam,
1861 	.set_pauseparam		= cpsw_set_pauseparam,
1862 	.get_wol	= cpsw_get_wol,
1863 	.set_wol	= cpsw_set_wol,
1864 	.get_regs_len	= cpsw_get_regs_len,
1865 	.get_regs	= cpsw_get_regs,
1866 };
1867 
1868 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1869 			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
1870 {
1871 	void __iomem		*regs = priv->regs;
1872 	int			slave_num = slave->slave_num;
1873 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
1874 
1875 	slave->data	= data;
1876 	slave->regs	= regs + slave_reg_ofs;
1877 	slave->sliver	= regs + sliver_reg_ofs;
1878 	slave->port_vlan = data->dual_emac_res_vlan;
1879 }
1880 
1881 #define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id)
1882 #define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4)
1883 
1884 static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave,
1885 		u8 *mac_addr)
1886 {
1887 	u32 macid_lo;
1888 	u32 macid_hi;
1889 	struct regmap *syscon;
1890 
1891 	syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
1892 	if (IS_ERR(syscon)) {
1893 		if (PTR_ERR(syscon) == -ENODEV)
1894 			return 0;
1895 		return PTR_ERR(syscon);
1896 	}
1897 
1898 	regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo);
1899 	regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi);
1900 
1901 	mac_addr[5] = (macid_lo >> 8) & 0xff;
1902 	mac_addr[4] = macid_lo & 0xff;
1903 	mac_addr[3] = (macid_hi >> 24) & 0xff;
1904 	mac_addr[2] = (macid_hi >> 16) & 0xff;
1905 	mac_addr[1] = (macid_hi >> 8) & 0xff;
1906 	mac_addr[0] = macid_hi & 0xff;
1907 
1908 	return 0;
1909 }
1910 
1911 static int cpsw_probe_dt(struct cpsw_platform_data *data,
1912 			 struct platform_device *pdev)
1913 {
1914 	struct device_node *node = pdev->dev.of_node;
1915 	struct device_node *slave_node;
1916 	int i = 0, ret;
1917 	u32 prop;
1918 
1919 	if (!node)
1920 		return -EINVAL;
1921 
1922 	if (of_property_read_u32(node, "slaves", &prop)) {
1923 		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
1924 		return -EINVAL;
1925 	}
1926 	data->slaves = prop;
1927 
1928 	if (of_property_read_u32(node, "active_slave", &prop)) {
1929 		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
1930 		return -EINVAL;
1931 	}
1932 	data->active_slave = prop;
1933 
1934 	if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1935 		dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
1936 		return -EINVAL;
1937 	}
1938 	data->cpts_clock_mult = prop;
1939 
1940 	if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1941 		dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
1942 		return -EINVAL;
1943 	}
1944 	data->cpts_clock_shift = prop;
1945 
1946 	data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
1947 					* sizeof(struct cpsw_slave_data),
1948 					GFP_KERNEL);
1949 	if (!data->slave_data)
1950 		return -ENOMEM;
1951 
1952 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1953 		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
1954 		return -EINVAL;
1955 	}
1956 	data->channels = prop;
1957 
1958 	if (of_property_read_u32(node, "ale_entries", &prop)) {
1959 		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
1960 		return -EINVAL;
1961 	}
1962 	data->ale_entries = prop;
1963 
1964 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1965 		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
1966 		return -EINVAL;
1967 	}
1968 	data->bd_ram_size = prop;
1969 
1970 	if (of_property_read_u32(node, "rx_descs", &prop)) {
1971 		dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
1972 		return -EINVAL;
1973 	}
1974 	data->rx_descs = prop;
1975 
1976 	if (of_property_read_u32(node, "mac_control", &prop)) {
1977 		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
1978 		return -EINVAL;
1979 	}
1980 	data->mac_control = prop;
1981 
1982 	if (of_property_read_bool(node, "dual_emac"))
1983 		data->dual_emac = 1;
1984 
1985 	/*
1986 	 * Populate all the child nodes here...
1987 	 */
1988 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1989 	/* We do not want to force this, as in some cases may not have child */
1990 	if (ret)
1991 		dev_warn(&pdev->dev, "Doesn't have any child node\n");
1992 
1993 	for_each_child_of_node(node, slave_node) {
1994 		struct cpsw_slave_data *slave_data = data->slave_data + i;
1995 		const void *mac_addr = NULL;
1996 		u32 phyid;
1997 		int lenp;
1998 		const __be32 *parp;
1999 		struct device_node *mdio_node;
2000 		struct platform_device *mdio;
2001 
2002 		/* This is no slave child node, continue */
2003 		if (strcmp(slave_node->name, "slave"))
2004 			continue;
2005 
2006 		parp = of_get_property(slave_node, "phy_id", &lenp);
2007 		if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
2008 			dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
2009 			return -EINVAL;
2010 		}
2011 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2012 		phyid = be32_to_cpup(parp+1);
2013 		mdio = of_find_device_by_node(mdio_node);
2014 		of_node_put(mdio_node);
2015 		if (!mdio) {
2016 			dev_err(&pdev->dev, "Missing mdio platform device\n");
2017 			return -EINVAL;
2018 		}
2019 		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2020 			 PHY_ID_FMT, mdio->name, phyid);
2021 
2022 		mac_addr = of_get_mac_address(slave_node);
2023 		if (mac_addr) {
2024 			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2025 		} else {
2026 			if (of_machine_is_compatible("ti,am33xx")) {
2027 				ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i,
2028 							slave_data->mac_addr);
2029 				if (ret)
2030 					return ret;
2031 			}
2032 		}
2033 
2034 		slave_data->phy_if = of_get_phy_mode(slave_node);
2035 		if (slave_data->phy_if < 0) {
2036 			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2037 				i);
2038 			return slave_data->phy_if;
2039 		}
2040 
2041 		if (data->dual_emac) {
2042 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2043 						 &prop)) {
2044 				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2045 				slave_data->dual_emac_res_vlan = i+1;
2046 				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2047 					slave_data->dual_emac_res_vlan, i);
2048 			} else {
2049 				slave_data->dual_emac_res_vlan = prop;
2050 			}
2051 		}
2052 
2053 		i++;
2054 		if (i == data->slaves)
2055 			break;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int cpsw_probe_dual_emac(struct platform_device *pdev,
2062 				struct cpsw_priv *priv)
2063 {
2064 	struct cpsw_platform_data	*data = &priv->data;
2065 	struct net_device		*ndev;
2066 	struct cpsw_priv		*priv_sl2;
2067 	int ret = 0, i;
2068 
2069 	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2070 	if (!ndev) {
2071 		dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
2072 		return -ENOMEM;
2073 	}
2074 
2075 	priv_sl2 = netdev_priv(ndev);
2076 	spin_lock_init(&priv_sl2->lock);
2077 	priv_sl2->data = *data;
2078 	priv_sl2->pdev = pdev;
2079 	priv_sl2->ndev = ndev;
2080 	priv_sl2->dev  = &ndev->dev;
2081 	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2082 	priv_sl2->rx_packet_max = max(rx_packet_max, 128);
2083 
2084 	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2085 		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2086 			ETH_ALEN);
2087 		dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
2088 	} else {
2089 		random_ether_addr(priv_sl2->mac_addr);
2090 		dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
2091 	}
2092 	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2093 
2094 	priv_sl2->slaves = priv->slaves;
2095 	priv_sl2->clk = priv->clk;
2096 
2097 	priv_sl2->coal_intvl = 0;
2098 	priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
2099 
2100 	priv_sl2->regs = priv->regs;
2101 	priv_sl2->host_port = priv->host_port;
2102 	priv_sl2->host_port_regs = priv->host_port_regs;
2103 	priv_sl2->wr_regs = priv->wr_regs;
2104 	priv_sl2->hw_stats = priv->hw_stats;
2105 	priv_sl2->dma = priv->dma;
2106 	priv_sl2->txch = priv->txch;
2107 	priv_sl2->rxch = priv->rxch;
2108 	priv_sl2->ale = priv->ale;
2109 	priv_sl2->emac_port = 1;
2110 	priv->slaves[1].ndev = ndev;
2111 	priv_sl2->cpts = priv->cpts;
2112 	priv_sl2->version = priv->version;
2113 
2114 	for (i = 0; i < priv->num_irqs; i++) {
2115 		priv_sl2->irqs_table[i] = priv->irqs_table[i];
2116 		priv_sl2->num_irqs = priv->num_irqs;
2117 	}
2118 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2119 
2120 	ndev->netdev_ops = &cpsw_netdev_ops;
2121 	ndev->ethtool_ops = &cpsw_ethtool_ops;
2122 	netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2123 
2124 	/* register the network device */
2125 	SET_NETDEV_DEV(ndev, &pdev->dev);
2126 	ret = register_netdev(ndev);
2127 	if (ret) {
2128 		dev_err(&pdev->dev, "cpsw: error registering net device\n");
2129 		free_netdev(ndev);
2130 		ret = -ENODEV;
2131 	}
2132 
2133 	return ret;
2134 }
2135 
2136 static int cpsw_probe(struct platform_device *pdev)
2137 {
2138 	struct cpsw_platform_data	*data;
2139 	struct net_device		*ndev;
2140 	struct cpsw_priv		*priv;
2141 	struct cpdma_params		dma_params;
2142 	struct cpsw_ale_params		ale_params;
2143 	void __iomem			*ss_regs;
2144 	struct resource			*res, *ss_res;
2145 	u32 slave_offset, sliver_offset, slave_size;
2146 	int ret = 0, i, k = 0;
2147 
2148 	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2149 	if (!ndev) {
2150 		dev_err(&pdev->dev, "error allocating net_device\n");
2151 		return -ENOMEM;
2152 	}
2153 
2154 	platform_set_drvdata(pdev, ndev);
2155 	priv = netdev_priv(ndev);
2156 	spin_lock_init(&priv->lock);
2157 	priv->pdev = pdev;
2158 	priv->ndev = ndev;
2159 	priv->dev  = &ndev->dev;
2160 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2161 	priv->rx_packet_max = max(rx_packet_max, 128);
2162 	priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
2163 	priv->irq_enabled = true;
2164 	if (!priv->cpts) {
2165 		dev_err(&pdev->dev, "error allocating cpts\n");
2166 		ret = -ENOMEM;
2167 		goto clean_ndev_ret;
2168 	}
2169 
2170 	/*
2171 	 * This may be required here for child devices.
2172 	 */
2173 	pm_runtime_enable(&pdev->dev);
2174 
2175 	/* Select default pin state */
2176 	pinctrl_pm_select_default_state(&pdev->dev);
2177 
2178 	if (cpsw_probe_dt(&priv->data, pdev)) {
2179 		dev_err(&pdev->dev, "cpsw: platform data missing\n");
2180 		ret = -ENODEV;
2181 		goto clean_runtime_disable_ret;
2182 	}
2183 	data = &priv->data;
2184 
2185 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2186 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2187 		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2188 	} else {
2189 		eth_random_addr(priv->mac_addr);
2190 		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2191 	}
2192 
2193 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2194 
2195 	priv->slaves = devm_kzalloc(&pdev->dev,
2196 				    sizeof(struct cpsw_slave) * data->slaves,
2197 				    GFP_KERNEL);
2198 	if (!priv->slaves) {
2199 		ret = -ENOMEM;
2200 		goto clean_runtime_disable_ret;
2201 	}
2202 	for (i = 0; i < data->slaves; i++)
2203 		priv->slaves[i].slave_num = i;
2204 
2205 	priv->slaves[0].ndev = ndev;
2206 	priv->emac_port = 0;
2207 
2208 	priv->clk = devm_clk_get(&pdev->dev, "fck");
2209 	if (IS_ERR(priv->clk)) {
2210 		dev_err(priv->dev, "fck is not found\n");
2211 		ret = -ENODEV;
2212 		goto clean_runtime_disable_ret;
2213 	}
2214 	priv->coal_intvl = 0;
2215 	priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
2216 
2217 	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2218 	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
2219 	if (IS_ERR(ss_regs)) {
2220 		ret = PTR_ERR(ss_regs);
2221 		goto clean_runtime_disable_ret;
2222 	}
2223 	priv->regs = ss_regs;
2224 	priv->host_port = HOST_PORT_NUM;
2225 
2226 	/* Need to enable clocks with runtime PM api to access module
2227 	 * registers
2228 	 */
2229 	pm_runtime_get_sync(&pdev->dev);
2230 	priv->version = readl(&priv->regs->id_ver);
2231 	pm_runtime_put_sync(&pdev->dev);
2232 
2233 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2234 	priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2235 	if (IS_ERR(priv->wr_regs)) {
2236 		ret = PTR_ERR(priv->wr_regs);
2237 		goto clean_runtime_disable_ret;
2238 	}
2239 
2240 	memset(&dma_params, 0, sizeof(dma_params));
2241 	memset(&ale_params, 0, sizeof(ale_params));
2242 
2243 	switch (priv->version) {
2244 	case CPSW_VERSION_1:
2245 		priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
2246 		priv->cpts->reg      = ss_regs + CPSW1_CPTS_OFFSET;
2247 		priv->hw_stats	     = ss_regs + CPSW1_HW_STATS;
2248 		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
2249 		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
2250 		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
2251 		slave_offset         = CPSW1_SLAVE_OFFSET;
2252 		slave_size           = CPSW1_SLAVE_SIZE;
2253 		sliver_offset        = CPSW1_SLIVER_OFFSET;
2254 		dma_params.desc_mem_phys = 0;
2255 		break;
2256 	case CPSW_VERSION_2:
2257 	case CPSW_VERSION_3:
2258 	case CPSW_VERSION_4:
2259 		priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
2260 		priv->cpts->reg      = ss_regs + CPSW2_CPTS_OFFSET;
2261 		priv->hw_stats	     = ss_regs + CPSW2_HW_STATS;
2262 		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
2263 		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
2264 		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
2265 		slave_offset         = CPSW2_SLAVE_OFFSET;
2266 		slave_size           = CPSW2_SLAVE_SIZE;
2267 		sliver_offset        = CPSW2_SLIVER_OFFSET;
2268 		dma_params.desc_mem_phys =
2269 			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
2270 		break;
2271 	default:
2272 		dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
2273 		ret = -ENODEV;
2274 		goto clean_runtime_disable_ret;
2275 	}
2276 	for (i = 0; i < priv->data.slaves; i++) {
2277 		struct cpsw_slave *slave = &priv->slaves[i];
2278 		cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
2279 		slave_offset  += slave_size;
2280 		sliver_offset += SLIVER_SIZE;
2281 	}
2282 
2283 	dma_params.dev		= &pdev->dev;
2284 	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
2285 	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
2286 	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
2287 	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
2288 	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
2289 
2290 	dma_params.num_chan		= data->channels;
2291 	dma_params.has_soft_reset	= true;
2292 	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
2293 	dma_params.desc_mem_size	= data->bd_ram_size;
2294 	dma_params.desc_align		= 16;
2295 	dma_params.has_ext_regs		= true;
2296 	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
2297 
2298 	priv->dma = cpdma_ctlr_create(&dma_params);
2299 	if (!priv->dma) {
2300 		dev_err(priv->dev, "error initializing dma\n");
2301 		ret = -ENOMEM;
2302 		goto clean_runtime_disable_ret;
2303 	}
2304 
2305 	priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
2306 				       cpsw_tx_handler);
2307 	priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
2308 				       cpsw_rx_handler);
2309 
2310 	if (WARN_ON(!priv->txch || !priv->rxch)) {
2311 		dev_err(priv->dev, "error initializing dma channels\n");
2312 		ret = -ENOMEM;
2313 		goto clean_dma_ret;
2314 	}
2315 
2316 	ale_params.dev			= &ndev->dev;
2317 	ale_params.ale_ageout		= ale_ageout;
2318 	ale_params.ale_entries		= data->ale_entries;
2319 	ale_params.ale_ports		= data->slaves;
2320 
2321 	priv->ale = cpsw_ale_create(&ale_params);
2322 	if (!priv->ale) {
2323 		dev_err(priv->dev, "error initializing ale engine\n");
2324 		ret = -ENODEV;
2325 		goto clean_dma_ret;
2326 	}
2327 
2328 	ndev->irq = platform_get_irq(pdev, 0);
2329 	if (ndev->irq < 0) {
2330 		dev_err(priv->dev, "error getting irq resource\n");
2331 		ret = -ENOENT;
2332 		goto clean_ale_ret;
2333 	}
2334 
2335 	while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2336 		if (k >= ARRAY_SIZE(priv->irqs_table)) {
2337 			ret = -EINVAL;
2338 			goto clean_ale_ret;
2339 		}
2340 
2341 		ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt,
2342 				       0, dev_name(&pdev->dev), priv);
2343 		if (ret < 0) {
2344 			dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2345 			goto clean_ale_ret;
2346 		}
2347 
2348 		priv->irqs_table[k] = res->start;
2349 		k++;
2350 	}
2351 
2352 	priv->num_irqs = k;
2353 
2354 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2355 
2356 	ndev->netdev_ops = &cpsw_netdev_ops;
2357 	ndev->ethtool_ops = &cpsw_ethtool_ops;
2358 	netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2359 
2360 	/* register the network device */
2361 	SET_NETDEV_DEV(ndev, &pdev->dev);
2362 	ret = register_netdev(ndev);
2363 	if (ret) {
2364 		dev_err(priv->dev, "error registering net device\n");
2365 		ret = -ENODEV;
2366 		goto clean_ale_ret;
2367 	}
2368 
2369 	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
2370 		    &ss_res->start, ndev->irq);
2371 
2372 	if (priv->data.dual_emac) {
2373 		ret = cpsw_probe_dual_emac(pdev, priv);
2374 		if (ret) {
2375 			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2376 			goto clean_ale_ret;
2377 		}
2378 	}
2379 
2380 	return 0;
2381 
2382 clean_ale_ret:
2383 	cpsw_ale_destroy(priv->ale);
2384 clean_dma_ret:
2385 	cpdma_chan_destroy(priv->txch);
2386 	cpdma_chan_destroy(priv->rxch);
2387 	cpdma_ctlr_destroy(priv->dma);
2388 clean_runtime_disable_ret:
2389 	pm_runtime_disable(&pdev->dev);
2390 clean_ndev_ret:
2391 	free_netdev(priv->ndev);
2392 	return ret;
2393 }
2394 
2395 static int cpsw_remove_child_device(struct device *dev, void *c)
2396 {
2397 	struct platform_device *pdev = to_platform_device(dev);
2398 
2399 	of_device_unregister(pdev);
2400 
2401 	return 0;
2402 }
2403 
2404 static int cpsw_remove(struct platform_device *pdev)
2405 {
2406 	struct net_device *ndev = platform_get_drvdata(pdev);
2407 	struct cpsw_priv *priv = netdev_priv(ndev);
2408 
2409 	if (priv->data.dual_emac)
2410 		unregister_netdev(cpsw_get_slave_ndev(priv, 1));
2411 	unregister_netdev(ndev);
2412 
2413 	cpsw_ale_destroy(priv->ale);
2414 	cpdma_chan_destroy(priv->txch);
2415 	cpdma_chan_destroy(priv->rxch);
2416 	cpdma_ctlr_destroy(priv->dma);
2417 	pm_runtime_disable(&pdev->dev);
2418 	device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
2419 	if (priv->data.dual_emac)
2420 		free_netdev(cpsw_get_slave_ndev(priv, 1));
2421 	free_netdev(ndev);
2422 	return 0;
2423 }
2424 
2425 static int cpsw_suspend(struct device *dev)
2426 {
2427 	struct platform_device	*pdev = to_platform_device(dev);
2428 	struct net_device	*ndev = platform_get_drvdata(pdev);
2429 	struct cpsw_priv	*priv = netdev_priv(ndev);
2430 
2431 	if (priv->data.dual_emac) {
2432 		int i;
2433 
2434 		for (i = 0; i < priv->data.slaves; i++) {
2435 			if (netif_running(priv->slaves[i].ndev))
2436 				cpsw_ndo_stop(priv->slaves[i].ndev);
2437 			soft_reset_slave(priv->slaves + i);
2438 		}
2439 	} else {
2440 		if (netif_running(ndev))
2441 			cpsw_ndo_stop(ndev);
2442 		for_each_slave(priv, soft_reset_slave);
2443 	}
2444 
2445 	pm_runtime_put_sync(&pdev->dev);
2446 
2447 	/* Select sleep pin state */
2448 	pinctrl_pm_select_sleep_state(&pdev->dev);
2449 
2450 	return 0;
2451 }
2452 
2453 static int cpsw_resume(struct device *dev)
2454 {
2455 	struct platform_device	*pdev = to_platform_device(dev);
2456 	struct net_device	*ndev = platform_get_drvdata(pdev);
2457 	struct cpsw_priv	*priv = netdev_priv(ndev);
2458 
2459 	pm_runtime_get_sync(&pdev->dev);
2460 
2461 	/* Select default pin state */
2462 	pinctrl_pm_select_default_state(&pdev->dev);
2463 
2464 	if (priv->data.dual_emac) {
2465 		int i;
2466 
2467 		for (i = 0; i < priv->data.slaves; i++) {
2468 			if (netif_running(priv->slaves[i].ndev))
2469 				cpsw_ndo_open(priv->slaves[i].ndev);
2470 		}
2471 	} else {
2472 		if (netif_running(ndev))
2473 			cpsw_ndo_open(ndev);
2474 	}
2475 	return 0;
2476 }
2477 
2478 static const struct dev_pm_ops cpsw_pm_ops = {
2479 	.suspend	= cpsw_suspend,
2480 	.resume		= cpsw_resume,
2481 };
2482 
2483 static const struct of_device_id cpsw_of_mtable[] = {
2484 	{ .compatible = "ti,cpsw", },
2485 	{ /* sentinel */ },
2486 };
2487 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2488 
2489 static struct platform_driver cpsw_driver = {
2490 	.driver = {
2491 		.name	 = "cpsw",
2492 		.pm	 = &cpsw_pm_ops,
2493 		.of_match_table = cpsw_of_mtable,
2494 	},
2495 	.probe = cpsw_probe,
2496 	.remove = cpsw_remove,
2497 };
2498 
2499 static int __init cpsw_init(void)
2500 {
2501 	return platform_driver_register(&cpsw_driver);
2502 }
2503 late_initcall(cpsw_init);
2504 
2505 static void __exit cpsw_exit(void)
2506 {
2507 	platform_driver_unregister(&cpsw_driver);
2508 }
2509 module_exit(cpsw_exit);
2510 
2511 MODULE_LICENSE("GPL");
2512 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
2513 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
2514 MODULE_DESCRIPTION("TI CPSW Ethernet driver");
2515