xref: /openbmc/linux/drivers/net/ethernet/ti/cpsw.c (revision 036b9e7c)
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/gpio/consumer.h>
33 #include <linux/of.h>
34 #include <linux/of_mdio.h>
35 #include <linux/of_net.h>
36 #include <linux/of_device.h>
37 #include <linux/if_vlan.h>
38 #include <linux/kmemleak.h>
39 #include <linux/sys_soc.h>
40 
41 #include <linux/pinctrl/consumer.h>
42 #include <net/pkt_cls.h>
43 
44 #include "cpsw.h"
45 #include "cpsw_ale.h"
46 #include "cpts.h"
47 #include "davinci_cpdma.h"
48 
49 #include <net/pkt_sched.h>
50 
51 #define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
52 			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
53 			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
54 			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
55 			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
56 			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
57 			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
58 			 NETIF_MSG_RX_STATUS)
59 
60 #define cpsw_info(priv, type, format, ...)		\
61 do {								\
62 	if (netif_msg_##type(priv) && net_ratelimit())		\
63 		dev_info(priv->dev, format, ## __VA_ARGS__);	\
64 } while (0)
65 
66 #define cpsw_err(priv, type, format, ...)		\
67 do {								\
68 	if (netif_msg_##type(priv) && net_ratelimit())		\
69 		dev_err(priv->dev, format, ## __VA_ARGS__);	\
70 } while (0)
71 
72 #define cpsw_dbg(priv, type, format, ...)		\
73 do {								\
74 	if (netif_msg_##type(priv) && net_ratelimit())		\
75 		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
76 } while (0)
77 
78 #define cpsw_notice(priv, type, format, ...)		\
79 do {								\
80 	if (netif_msg_##type(priv) && net_ratelimit())		\
81 		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
82 } while (0)
83 
84 #define ALE_ALL_PORTS		0x7
85 
86 #define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
87 #define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
88 #define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
89 
90 #define CPSW_VERSION_1		0x19010a
91 #define CPSW_VERSION_2		0x19010c
92 #define CPSW_VERSION_3		0x19010f
93 #define CPSW_VERSION_4		0x190112
94 
95 #define HOST_PORT_NUM		0
96 #define CPSW_ALE_PORTS_NUM	3
97 #define SLIVER_SIZE		0x40
98 
99 #define CPSW1_HOST_PORT_OFFSET	0x028
100 #define CPSW1_SLAVE_OFFSET	0x050
101 #define CPSW1_SLAVE_SIZE	0x040
102 #define CPSW1_CPDMA_OFFSET	0x100
103 #define CPSW1_STATERAM_OFFSET	0x200
104 #define CPSW1_HW_STATS		0x400
105 #define CPSW1_CPTS_OFFSET	0x500
106 #define CPSW1_ALE_OFFSET	0x600
107 #define CPSW1_SLIVER_OFFSET	0x700
108 
109 #define CPSW2_HOST_PORT_OFFSET	0x108
110 #define CPSW2_SLAVE_OFFSET	0x200
111 #define CPSW2_SLAVE_SIZE	0x100
112 #define CPSW2_CPDMA_OFFSET	0x800
113 #define CPSW2_HW_STATS		0x900
114 #define CPSW2_STATERAM_OFFSET	0xa00
115 #define CPSW2_CPTS_OFFSET	0xc00
116 #define CPSW2_ALE_OFFSET	0xd00
117 #define CPSW2_SLIVER_OFFSET	0xd80
118 #define CPSW2_BD_OFFSET		0x2000
119 
120 #define CPDMA_RXTHRESH		0x0c0
121 #define CPDMA_RXFREE		0x0e0
122 #define CPDMA_TXHDP		0x00
123 #define CPDMA_RXHDP		0x20
124 #define CPDMA_TXCP		0x40
125 #define CPDMA_RXCP		0x60
126 
127 #define CPSW_POLL_WEIGHT	64
128 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE		4
129 #define CPSW_MIN_PACKET_SIZE	(VLAN_ETH_ZLEN)
130 #define CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN +\
131 				 ETH_FCS_LEN +\
132 				 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
133 
134 #define RX_PRIORITY_MAPPING	0x76543210
135 #define TX_PRIORITY_MAPPING	0x33221100
136 #define CPDMA_TX_PRIORITY_MAP	0x76543210
137 
138 #define CPSW_VLAN_AWARE		BIT(1)
139 #define CPSW_RX_VLAN_ENCAP	BIT(2)
140 #define CPSW_ALE_VLAN_AWARE	1
141 
142 #define CPSW_FIFO_NORMAL_MODE		(0 << 16)
143 #define CPSW_FIFO_DUAL_MAC_MODE		(1 << 16)
144 #define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 16)
145 
146 #define CPSW_INTPACEEN		(0x3f << 16)
147 #define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
148 #define CPSW_CMINTMAX_CNT	63
149 #define CPSW_CMINTMIN_CNT	2
150 #define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
151 #define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)
152 
153 #define cpsw_slave_index(cpsw, priv)				\
154 		((cpsw->data.dual_emac) ? priv->emac_port :	\
155 		cpsw->data.active_slave)
156 #define IRQ_NUM			2
157 #define CPSW_MAX_QUEUES		8
158 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
159 #define CPSW_FIFO_QUEUE_TYPE_SHIFT	16
160 #define CPSW_FIFO_SHAPE_EN_SHIFT	16
161 #define CPSW_FIFO_RATE_EN_SHIFT		20
162 #define CPSW_TC_NUM			4
163 #define CPSW_FIFO_SHAPERS_NUM		(CPSW_TC_NUM - 1)
164 #define CPSW_PCT_MASK			0x7f
165 
166 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT	29
167 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK		GENMASK(2, 0)
168 #define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT	16
169 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT	8
170 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK	GENMASK(1, 0)
171 enum {
172 	CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
173 	CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
174 	CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
175 	CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
176 };
177 
178 static int debug_level;
179 module_param(debug_level, int, 0);
180 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
181 
182 static int ale_ageout = 10;
183 module_param(ale_ageout, int, 0);
184 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
185 
186 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
187 module_param(rx_packet_max, int, 0);
188 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
189 
190 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
191 module_param(descs_pool_size, int, 0444);
192 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
193 
194 struct cpsw_wr_regs {
195 	u32	id_ver;
196 	u32	soft_reset;
197 	u32	control;
198 	u32	int_control;
199 	u32	rx_thresh_en;
200 	u32	rx_en;
201 	u32	tx_en;
202 	u32	misc_en;
203 	u32	mem_allign1[8];
204 	u32	rx_thresh_stat;
205 	u32	rx_stat;
206 	u32	tx_stat;
207 	u32	misc_stat;
208 	u32	mem_allign2[8];
209 	u32	rx_imax;
210 	u32	tx_imax;
211 
212 };
213 
214 struct cpsw_ss_regs {
215 	u32	id_ver;
216 	u32	control;
217 	u32	soft_reset;
218 	u32	stat_port_en;
219 	u32	ptype;
220 	u32	soft_idle;
221 	u32	thru_rate;
222 	u32	gap_thresh;
223 	u32	tx_start_wds;
224 	u32	flow_control;
225 	u32	vlan_ltype;
226 	u32	ts_ltype;
227 	u32	dlr_ltype;
228 };
229 
230 /* CPSW_PORT_V1 */
231 #define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
232 #define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
233 #define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
234 #define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
235 #define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
236 #define CPSW1_TS_CTL        0x14 /* Time Sync Control */
237 #define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
238 #define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
239 
240 /* CPSW_PORT_V2 */
241 #define CPSW2_CONTROL       0x00 /* Control Register */
242 #define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
243 #define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
244 #define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
245 #define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
246 #define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
247 #define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
248 
249 /* CPSW_PORT_V1 and V2 */
250 #define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
251 #define SA_HI               0x24 /* CPGMAC_SL Source Address High */
252 #define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
253 
254 /* CPSW_PORT_V2 only */
255 #define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
256 #define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
257 #define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
258 #define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
259 #define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
260 #define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
261 #define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
262 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
263 
264 /* Bit definitions for the CPSW2_CONTROL register */
265 #define PASS_PRI_TAGGED     BIT(24) /* Pass Priority Tagged */
266 #define VLAN_LTYPE2_EN      BIT(21) /* VLAN LTYPE 2 enable */
267 #define VLAN_LTYPE1_EN      BIT(20) /* VLAN LTYPE 1 enable */
268 #define DSCP_PRI_EN         BIT(16) /* DSCP Priority Enable */
269 #define TS_107              BIT(15) /* Tyme Sync Dest IP Address 107 */
270 #define TS_320              BIT(14) /* Time Sync Dest Port 320 enable */
271 #define TS_319              BIT(13) /* Time Sync Dest Port 319 enable */
272 #define TS_132              BIT(12) /* Time Sync Dest IP Addr 132 enable */
273 #define TS_131              BIT(11) /* Time Sync Dest IP Addr 131 enable */
274 #define TS_130              BIT(10) /* Time Sync Dest IP Addr 130 enable */
275 #define TS_129              BIT(9)  /* Time Sync Dest IP Addr 129 enable */
276 #define TS_TTL_NONZERO      BIT(8)  /* Time Sync Time To Live Non-zero enable */
277 #define TS_ANNEX_F_EN       BIT(6)  /* Time Sync Annex F enable */
278 #define TS_ANNEX_D_EN       BIT(4)  /* Time Sync Annex D enable */
279 #define TS_LTYPE2_EN        BIT(3)  /* Time Sync LTYPE 2 enable */
280 #define TS_LTYPE1_EN        BIT(2)  /* Time Sync LTYPE 1 enable */
281 #define TS_TX_EN            BIT(1)  /* Time Sync Transmit Enable */
282 #define TS_RX_EN            BIT(0)  /* Time Sync Receive Enable */
283 
284 #define CTRL_V2_TS_BITS \
285 	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
286 	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
287 
288 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
289 #define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
290 #define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
291 
292 
293 #define CTRL_V3_TS_BITS \
294 	(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
295 	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
296 	 TS_LTYPE1_EN | VLAN_LTYPE1_EN)
297 
298 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
299 #define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
300 #define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
301 
302 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
303 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
304 #define TS_SEQ_ID_OFFSET_MASK    (0x3f)
305 #define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
306 #define TS_MSG_TYPE_EN_MASK      (0xffff)
307 
308 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
309 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
310 
311 /* Bit definitions for the CPSW1_TS_CTL register */
312 #define CPSW_V1_TS_RX_EN		BIT(0)
313 #define CPSW_V1_TS_TX_EN		BIT(4)
314 #define CPSW_V1_MSG_TYPE_OFS		16
315 
316 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
317 #define CPSW_V1_SEQ_ID_OFS_SHIFT	16
318 
319 #define CPSW_MAX_BLKS_TX		15
320 #define CPSW_MAX_BLKS_TX_SHIFT		4
321 #define CPSW_MAX_BLKS_RX		5
322 
323 struct cpsw_host_regs {
324 	u32	max_blks;
325 	u32	blk_cnt;
326 	u32	tx_in_ctl;
327 	u32	port_vlan;
328 	u32	tx_pri_map;
329 	u32	cpdma_tx_pri_map;
330 	u32	cpdma_rx_chan_map;
331 };
332 
333 struct cpsw_sliver_regs {
334 	u32	id_ver;
335 	u32	mac_control;
336 	u32	mac_status;
337 	u32	soft_reset;
338 	u32	rx_maxlen;
339 	u32	__reserved_0;
340 	u32	rx_pause;
341 	u32	tx_pause;
342 	u32	__reserved_1;
343 	u32	rx_pri_map;
344 };
345 
346 struct cpsw_hw_stats {
347 	u32	rxgoodframes;
348 	u32	rxbroadcastframes;
349 	u32	rxmulticastframes;
350 	u32	rxpauseframes;
351 	u32	rxcrcerrors;
352 	u32	rxaligncodeerrors;
353 	u32	rxoversizedframes;
354 	u32	rxjabberframes;
355 	u32	rxundersizedframes;
356 	u32	rxfragments;
357 	u32	__pad_0[2];
358 	u32	rxoctets;
359 	u32	txgoodframes;
360 	u32	txbroadcastframes;
361 	u32	txmulticastframes;
362 	u32	txpauseframes;
363 	u32	txdeferredframes;
364 	u32	txcollisionframes;
365 	u32	txsinglecollframes;
366 	u32	txmultcollframes;
367 	u32	txexcessivecollisions;
368 	u32	txlatecollisions;
369 	u32	txunderrun;
370 	u32	txcarriersenseerrors;
371 	u32	txoctets;
372 	u32	octetframes64;
373 	u32	octetframes65t127;
374 	u32	octetframes128t255;
375 	u32	octetframes256t511;
376 	u32	octetframes512t1023;
377 	u32	octetframes1024tup;
378 	u32	netoctets;
379 	u32	rxsofoverruns;
380 	u32	rxmofoverruns;
381 	u32	rxdmaoverruns;
382 };
383 
384 struct cpsw_slave_data {
385 	struct device_node *phy_node;
386 	char		phy_id[MII_BUS_ID_SIZE];
387 	int		phy_if;
388 	u8		mac_addr[ETH_ALEN];
389 	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
390 };
391 
392 struct cpsw_platform_data {
393 	struct cpsw_slave_data	*slave_data;
394 	u32	ss_reg_ofs;	/* Subsystem control register offset */
395 	u32	channels;	/* number of cpdma channels (symmetric) */
396 	u32	slaves;		/* number of slave cpgmac ports */
397 	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
398 	u32	ale_entries;	/* ale table size */
399 	u32	bd_ram_size;  /*buffer descriptor ram size */
400 	u32	mac_control;	/* Mac control register */
401 	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
402 	bool	dual_emac;	/* Enable Dual EMAC mode */
403 };
404 
405 struct cpsw_slave {
406 	void __iomem			*regs;
407 	struct cpsw_sliver_regs __iomem	*sliver;
408 	int				slave_num;
409 	u32				mac_control;
410 	struct cpsw_slave_data		*data;
411 	struct phy_device		*phy;
412 	struct net_device		*ndev;
413 	u32				port_vlan;
414 };
415 
416 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
417 {
418 	return readl_relaxed(slave->regs + offset);
419 }
420 
421 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
422 {
423 	writel_relaxed(val, slave->regs + offset);
424 }
425 
426 struct cpsw_vector {
427 	struct cpdma_chan *ch;
428 	int budget;
429 };
430 
431 struct cpsw_common {
432 	struct device			*dev;
433 	struct cpsw_platform_data	data;
434 	struct napi_struct		napi_rx;
435 	struct napi_struct		napi_tx;
436 	struct cpsw_ss_regs __iomem	*regs;
437 	struct cpsw_wr_regs __iomem	*wr_regs;
438 	u8 __iomem			*hw_stats;
439 	struct cpsw_host_regs __iomem	*host_port_regs;
440 	u32				version;
441 	u32				coal_intvl;
442 	u32				bus_freq_mhz;
443 	int				rx_packet_max;
444 	struct cpsw_slave		*slaves;
445 	struct cpdma_ctlr		*dma;
446 	struct cpsw_vector		txv[CPSW_MAX_QUEUES];
447 	struct cpsw_vector		rxv[CPSW_MAX_QUEUES];
448 	struct cpsw_ale			*ale;
449 	bool				quirk_irq;
450 	bool				rx_irq_disabled;
451 	bool				tx_irq_disabled;
452 	u32 irqs_table[IRQ_NUM];
453 	struct cpts			*cpts;
454 	int				rx_ch_num, tx_ch_num;
455 	int				speed;
456 	int				usage_count;
457 };
458 
459 struct cpsw_priv {
460 	struct net_device		*ndev;
461 	struct device			*dev;
462 	u32				msg_enable;
463 	u8				mac_addr[ETH_ALEN];
464 	bool				rx_pause;
465 	bool				tx_pause;
466 	bool				mqprio_hw;
467 	int				fifo_bw[CPSW_TC_NUM];
468 	int				shp_cfg_speed;
469 	int				tx_ts_enabled;
470 	int				rx_ts_enabled;
471 	u32 emac_port;
472 	struct cpsw_common *cpsw;
473 };
474 
475 struct cpsw_stats {
476 	char stat_string[ETH_GSTRING_LEN];
477 	int type;
478 	int sizeof_stat;
479 	int stat_offset;
480 };
481 
482 enum {
483 	CPSW_STATS,
484 	CPDMA_RX_STATS,
485 	CPDMA_TX_STATS,
486 };
487 
488 #define CPSW_STAT(m)		CPSW_STATS,				\
489 				FIELD_SIZEOF(struct cpsw_hw_stats, m), \
490 				offsetof(struct cpsw_hw_stats, m)
491 #define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
492 				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
493 				offsetof(struct cpdma_chan_stats, m)
494 #define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
495 				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
496 				offsetof(struct cpdma_chan_stats, m)
497 
498 static const struct cpsw_stats cpsw_gstrings_stats[] = {
499 	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
500 	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
501 	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
502 	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
503 	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
504 	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
505 	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
506 	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
507 	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
508 	{ "Rx Fragments", CPSW_STAT(rxfragments) },
509 	{ "Rx Octets", CPSW_STAT(rxoctets) },
510 	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
511 	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
512 	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
513 	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
514 	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
515 	{ "Collisions", CPSW_STAT(txcollisionframes) },
516 	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
517 	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
518 	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
519 	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
520 	{ "Tx Underrun", CPSW_STAT(txunderrun) },
521 	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
522 	{ "Tx Octets", CPSW_STAT(txoctets) },
523 	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
524 	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
525 	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
526 	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
527 	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
528 	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
529 	{ "Net Octets", CPSW_STAT(netoctets) },
530 	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
531 	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
532 	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
533 };
534 
535 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
536 	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
537 	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
538 	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
539 	{ "misqueued", CPDMA_RX_STAT(misqueued) },
540 	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
541 	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
542 	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
543 	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
544 	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
545 	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
546 	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
547 	{ "requeue", CPDMA_RX_STAT(requeue) },
548 	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
549 };
550 
551 #define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
552 #define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
553 
554 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
555 #define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
556 #define for_each_slave(priv, func, arg...)				\
557 	do {								\
558 		struct cpsw_slave *slave;				\
559 		struct cpsw_common *cpsw = (priv)->cpsw;		\
560 		int n;							\
561 		if (cpsw->data.dual_emac)				\
562 			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
563 		else							\
564 			for (n = cpsw->data.slaves,			\
565 					slave = cpsw->slaves;		\
566 					n; n--)				\
567 				(func)(slave++, ##arg);			\
568 	} while (0)
569 
570 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
571 				    __be16 proto, u16 vid);
572 
573 static inline int cpsw_get_slave_port(u32 slave_num)
574 {
575 	return slave_num + 1;
576 }
577 
578 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
579 {
580 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
581 	struct cpsw_ale *ale = cpsw->ale;
582 	int i;
583 
584 	if (cpsw->data.dual_emac) {
585 		bool flag = false;
586 
587 		/* Enabling promiscuous mode for one interface will be
588 		 * common for both the interface as the interface shares
589 		 * the same hardware resource.
590 		 */
591 		for (i = 0; i < cpsw->data.slaves; i++)
592 			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
593 				flag = true;
594 
595 		if (!enable && flag) {
596 			enable = true;
597 			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
598 		}
599 
600 		if (enable) {
601 			/* Enable Bypass */
602 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
603 
604 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
605 		} else {
606 			/* Disable Bypass */
607 			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
608 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
609 		}
610 	} else {
611 		if (enable) {
612 			unsigned long timeout = jiffies + HZ;
613 
614 			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
615 			for (i = 0; i <= cpsw->data.slaves; i++) {
616 				cpsw_ale_control_set(ale, i,
617 						     ALE_PORT_NOLEARN, 1);
618 				cpsw_ale_control_set(ale, i,
619 						     ALE_PORT_NO_SA_UPDATE, 1);
620 			}
621 
622 			/* Clear All Untouched entries */
623 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
624 			do {
625 				cpu_relax();
626 				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
627 					break;
628 			} while (time_after(timeout, jiffies));
629 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
630 
631 			/* Clear all mcast from ALE */
632 			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
633 			__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
634 
635 			/* Flood All Unicast Packets to Host port */
636 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
637 			dev_dbg(&ndev->dev, "promiscuity enabled\n");
638 		} else {
639 			/* Don't Flood All Unicast Packets to Host port */
640 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
641 
642 			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
643 			for (i = 0; i <= cpsw->data.slaves; i++) {
644 				cpsw_ale_control_set(ale, i,
645 						     ALE_PORT_NOLEARN, 0);
646 				cpsw_ale_control_set(ale, i,
647 						     ALE_PORT_NO_SA_UPDATE, 0);
648 			}
649 			dev_dbg(&ndev->dev, "promiscuity disabled\n");
650 		}
651 	}
652 }
653 
654 struct addr_sync_ctx {
655 	struct net_device *ndev;
656 	const u8 *addr;		/* address to be synched */
657 	int consumed;		/* number of address instances */
658 	int flush;		/* flush flag */
659 };
660 
661 /**
662  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
663  * if it's not deleted
664  * @ndev: device to sync
665  * @addr: address to be added or deleted
666  * @vid: vlan id, if vid < 0 set/unset address for real device
667  * @add: add address if the flag is set or remove otherwise
668  */
669 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
670 		       int vid, int add)
671 {
672 	struct cpsw_priv *priv = netdev_priv(ndev);
673 	struct cpsw_common *cpsw = priv->cpsw;
674 	int mask, flags, ret;
675 
676 	if (vid < 0) {
677 		if (cpsw->data.dual_emac)
678 			vid = cpsw->slaves[priv->emac_port].port_vlan;
679 		else
680 			vid = 0;
681 	}
682 
683 	mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
684 	flags = vid ? ALE_VLAN : 0;
685 
686 	if (add)
687 		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
688 	else
689 		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
690 
691 	return ret;
692 }
693 
694 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
695 {
696 	struct addr_sync_ctx *sync_ctx = ctx;
697 	struct netdev_hw_addr *ha;
698 	int found = 0, ret = 0;
699 
700 	if (!vdev || !(vdev->flags & IFF_UP))
701 		return 0;
702 
703 	/* vlan address is relevant if its sync_cnt != 0 */
704 	netdev_for_each_mc_addr(ha, vdev) {
705 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
706 			found = ha->sync_cnt;
707 			break;
708 		}
709 	}
710 
711 	if (found)
712 		sync_ctx->consumed++;
713 
714 	if (sync_ctx->flush) {
715 		if (!found)
716 			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
717 		return 0;
718 	}
719 
720 	if (found)
721 		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
722 
723 	return ret;
724 }
725 
726 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
727 {
728 	struct addr_sync_ctx sync_ctx;
729 	int ret;
730 
731 	sync_ctx.consumed = 0;
732 	sync_ctx.addr = addr;
733 	sync_ctx.ndev = ndev;
734 	sync_ctx.flush = 0;
735 
736 	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
737 	if (sync_ctx.consumed < num && !ret)
738 		ret = cpsw_set_mc(ndev, addr, -1, 1);
739 
740 	return ret;
741 }
742 
743 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
744 {
745 	struct addr_sync_ctx sync_ctx;
746 
747 	sync_ctx.consumed = 0;
748 	sync_ctx.addr = addr;
749 	sync_ctx.ndev = ndev;
750 	sync_ctx.flush = 1;
751 
752 	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
753 	if (sync_ctx.consumed == num)
754 		cpsw_set_mc(ndev, addr, -1, 0);
755 
756 	return 0;
757 }
758 
759 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
760 {
761 	struct addr_sync_ctx *sync_ctx = ctx;
762 	struct netdev_hw_addr *ha;
763 	int found = 0;
764 
765 	if (!vdev || !(vdev->flags & IFF_UP))
766 		return 0;
767 
768 	/* vlan address is relevant if its sync_cnt != 0 */
769 	netdev_for_each_mc_addr(ha, vdev) {
770 		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
771 			found = ha->sync_cnt;
772 			break;
773 		}
774 	}
775 
776 	if (!found)
777 		return 0;
778 
779 	sync_ctx->consumed++;
780 	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
781 	return 0;
782 }
783 
784 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
785 {
786 	struct addr_sync_ctx sync_ctx;
787 
788 	sync_ctx.addr = addr;
789 	sync_ctx.ndev = ndev;
790 	sync_ctx.consumed = 0;
791 
792 	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
793 	if (sync_ctx.consumed < num)
794 		cpsw_set_mc(ndev, addr, -1, 0);
795 
796 	return 0;
797 }
798 
799 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
800 {
801 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
802 
803 	if (ndev->flags & IFF_PROMISC) {
804 		/* Enable promiscuous mode */
805 		cpsw_set_promiscious(ndev, true);
806 		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
807 		return;
808 	} else {
809 		/* Disable promiscuous mode */
810 		cpsw_set_promiscious(ndev, false);
811 	}
812 
813 	/* Restore allmulti on vlans if necessary */
814 	cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
815 
816 	/* add/remove mcast address either for real netdev or for vlan */
817 	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
818 			       cpsw_del_mc_addr);
819 }
820 
821 static void cpsw_intr_enable(struct cpsw_common *cpsw)
822 {
823 	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
824 	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
825 
826 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
827 	return;
828 }
829 
830 static void cpsw_intr_disable(struct cpsw_common *cpsw)
831 {
832 	writel_relaxed(0, &cpsw->wr_regs->tx_en);
833 	writel_relaxed(0, &cpsw->wr_regs->rx_en);
834 
835 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
836 	return;
837 }
838 
839 static void cpsw_tx_handler(void *token, int len, int status)
840 {
841 	struct netdev_queue	*txq;
842 	struct sk_buff		*skb = token;
843 	struct net_device	*ndev = skb->dev;
844 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
845 
846 	/* Check whether the queue is stopped due to stalled tx dma, if the
847 	 * queue is stopped then start the queue as we have free desc for tx
848 	 */
849 	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
850 	if (unlikely(netif_tx_queue_stopped(txq)))
851 		netif_tx_wake_queue(txq);
852 
853 	cpts_tx_timestamp(cpsw->cpts, skb);
854 	ndev->stats.tx_packets++;
855 	ndev->stats.tx_bytes += len;
856 	dev_kfree_skb_any(skb);
857 }
858 
859 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
860 {
861 	struct cpsw_priv *priv = netdev_priv(skb->dev);
862 	struct cpsw_common *cpsw = priv->cpsw;
863 	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
864 	u16 vtag, vid, prio, pkt_type;
865 
866 	/* Remove VLAN header encapsulation word */
867 	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
868 
869 	pkt_type = (rx_vlan_encap_hdr >>
870 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
871 		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
872 	/* Ignore unknown & Priority-tagged packets*/
873 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
874 	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
875 		return;
876 
877 	vid = (rx_vlan_encap_hdr >>
878 	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
879 	       VLAN_VID_MASK;
880 	/* Ignore vid 0 and pass packet as is */
881 	if (!vid)
882 		return;
883 	/* Ignore default vlans in dual mac mode */
884 	if (cpsw->data.dual_emac &&
885 	    vid == cpsw->slaves[priv->emac_port].port_vlan)
886 		return;
887 
888 	prio = (rx_vlan_encap_hdr >>
889 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
890 		CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
891 
892 	vtag = (prio << VLAN_PRIO_SHIFT) | vid;
893 	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
894 
895 	/* strip vlan tag for VLAN-tagged packet */
896 	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
897 		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
898 		skb_pull(skb, VLAN_HLEN);
899 	}
900 }
901 
902 static void cpsw_rx_handler(void *token, int len, int status)
903 {
904 	struct cpdma_chan	*ch;
905 	struct sk_buff		*skb = token;
906 	struct sk_buff		*new_skb;
907 	struct net_device	*ndev = skb->dev;
908 	int			ret = 0, port;
909 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
910 	struct cpsw_priv	*priv;
911 
912 	if (cpsw->data.dual_emac) {
913 		port = CPDMA_RX_SOURCE_PORT(status);
914 		if (port) {
915 			ndev = cpsw->slaves[--port].ndev;
916 			skb->dev = ndev;
917 		}
918 	}
919 
920 	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
921 		/* In dual emac mode check for all interfaces */
922 		if (cpsw->data.dual_emac && cpsw->usage_count &&
923 		    (status >= 0)) {
924 			/* The packet received is for the interface which
925 			 * is already down and the other interface is up
926 			 * and running, instead of freeing which results
927 			 * in reducing of the number of rx descriptor in
928 			 * DMA engine, requeue skb back to cpdma.
929 			 */
930 			new_skb = skb;
931 			goto requeue;
932 		}
933 
934 		/* the interface is going down, skbs are purged */
935 		dev_kfree_skb_any(skb);
936 		return;
937 	}
938 
939 	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
940 	if (new_skb) {
941 		skb_copy_queue_mapping(new_skb, skb);
942 		skb_put(skb, len);
943 		if (status & CPDMA_RX_VLAN_ENCAP)
944 			cpsw_rx_vlan_encap(skb);
945 		priv = netdev_priv(ndev);
946 		if (priv->rx_ts_enabled)
947 			cpts_rx_timestamp(cpsw->cpts, skb);
948 		skb->protocol = eth_type_trans(skb, ndev);
949 		netif_receive_skb(skb);
950 		ndev->stats.rx_bytes += len;
951 		ndev->stats.rx_packets++;
952 		kmemleak_not_leak(new_skb);
953 	} else {
954 		ndev->stats.rx_dropped++;
955 		new_skb = skb;
956 	}
957 
958 requeue:
959 	if (netif_dormant(ndev)) {
960 		dev_kfree_skb_any(new_skb);
961 		return;
962 	}
963 
964 	ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
965 	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
966 				skb_tailroom(new_skb), 0);
967 	if (WARN_ON(ret < 0))
968 		dev_kfree_skb_any(new_skb);
969 }
970 
971 static void cpsw_split_res(struct net_device *ndev)
972 {
973 	struct cpsw_priv *priv = netdev_priv(ndev);
974 	u32 consumed_rate = 0, bigest_rate = 0;
975 	struct cpsw_common *cpsw = priv->cpsw;
976 	struct cpsw_vector *txv = cpsw->txv;
977 	int i, ch_weight, rlim_ch_num = 0;
978 	int budget, bigest_rate_ch = 0;
979 	u32 ch_rate, max_rate;
980 	int ch_budget = 0;
981 
982 	for (i = 0; i < cpsw->tx_ch_num; i++) {
983 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
984 		if (!ch_rate)
985 			continue;
986 
987 		rlim_ch_num++;
988 		consumed_rate += ch_rate;
989 	}
990 
991 	if (cpsw->tx_ch_num == rlim_ch_num) {
992 		max_rate = consumed_rate;
993 	} else if (!rlim_ch_num) {
994 		ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
995 		bigest_rate = 0;
996 		max_rate = consumed_rate;
997 	} else {
998 		max_rate = cpsw->speed * 1000;
999 
1000 		/* if max_rate is less then expected due to reduced link speed,
1001 		 * split proportionally according next potential max speed
1002 		 */
1003 		if (max_rate < consumed_rate)
1004 			max_rate *= 10;
1005 
1006 		if (max_rate < consumed_rate)
1007 			max_rate *= 10;
1008 
1009 		ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
1010 		ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
1011 			    (cpsw->tx_ch_num - rlim_ch_num);
1012 		bigest_rate = (max_rate - consumed_rate) /
1013 			      (cpsw->tx_ch_num - rlim_ch_num);
1014 	}
1015 
1016 	/* split tx weight/budget */
1017 	budget = CPSW_POLL_WEIGHT;
1018 	for (i = 0; i < cpsw->tx_ch_num; i++) {
1019 		ch_rate = cpdma_chan_get_rate(txv[i].ch);
1020 		if (ch_rate) {
1021 			txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
1022 			if (!txv[i].budget)
1023 				txv[i].budget++;
1024 			if (ch_rate > bigest_rate) {
1025 				bigest_rate_ch = i;
1026 				bigest_rate = ch_rate;
1027 			}
1028 
1029 			ch_weight = (ch_rate * 100) / max_rate;
1030 			if (!ch_weight)
1031 				ch_weight++;
1032 			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
1033 		} else {
1034 			txv[i].budget = ch_budget;
1035 			if (!bigest_rate_ch)
1036 				bigest_rate_ch = i;
1037 			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
1038 		}
1039 
1040 		budget -= txv[i].budget;
1041 	}
1042 
1043 	if (budget)
1044 		txv[bigest_rate_ch].budget += budget;
1045 
1046 	/* split rx budget */
1047 	budget = CPSW_POLL_WEIGHT;
1048 	ch_budget = budget / cpsw->rx_ch_num;
1049 	for (i = 0; i < cpsw->rx_ch_num; i++) {
1050 		cpsw->rxv[i].budget = ch_budget;
1051 		budget -= ch_budget;
1052 	}
1053 
1054 	if (budget)
1055 		cpsw->rxv[0].budget += budget;
1056 }
1057 
1058 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
1059 {
1060 	struct cpsw_common *cpsw = dev_id;
1061 
1062 	writel(0, &cpsw->wr_regs->tx_en);
1063 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
1064 
1065 	if (cpsw->quirk_irq) {
1066 		disable_irq_nosync(cpsw->irqs_table[1]);
1067 		cpsw->tx_irq_disabled = true;
1068 	}
1069 
1070 	napi_schedule(&cpsw->napi_tx);
1071 	return IRQ_HANDLED;
1072 }
1073 
1074 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
1075 {
1076 	struct cpsw_common *cpsw = dev_id;
1077 
1078 	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
1079 	writel(0, &cpsw->wr_regs->rx_en);
1080 
1081 	if (cpsw->quirk_irq) {
1082 		disable_irq_nosync(cpsw->irqs_table[0]);
1083 		cpsw->rx_irq_disabled = true;
1084 	}
1085 
1086 	napi_schedule(&cpsw->napi_rx);
1087 	return IRQ_HANDLED;
1088 }
1089 
1090 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
1091 {
1092 	u32			ch_map;
1093 	int			num_tx, cur_budget, ch;
1094 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
1095 	struct cpsw_vector	*txv;
1096 
1097 	/* process every unprocessed channel */
1098 	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
1099 	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
1100 		if (!(ch_map & 0x80))
1101 			continue;
1102 
1103 		txv = &cpsw->txv[ch];
1104 		if (unlikely(txv->budget > budget - num_tx))
1105 			cur_budget = budget - num_tx;
1106 		else
1107 			cur_budget = txv->budget;
1108 
1109 		num_tx += cpdma_chan_process(txv->ch, cur_budget);
1110 		if (num_tx >= budget)
1111 			break;
1112 	}
1113 
1114 	if (num_tx < budget) {
1115 		napi_complete(napi_tx);
1116 		writel(0xff, &cpsw->wr_regs->tx_en);
1117 	}
1118 
1119 	return num_tx;
1120 }
1121 
1122 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1123 {
1124 	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1125 	int num_tx;
1126 
1127 	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1128 	if (num_tx < budget) {
1129 		napi_complete(napi_tx);
1130 		writel(0xff, &cpsw->wr_regs->tx_en);
1131 		if (cpsw->tx_irq_disabled) {
1132 			cpsw->tx_irq_disabled = false;
1133 			enable_irq(cpsw->irqs_table[1]);
1134 		}
1135 	}
1136 
1137 	return num_tx;
1138 }
1139 
1140 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
1141 {
1142 	u32			ch_map;
1143 	int			num_rx, cur_budget, ch;
1144 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
1145 	struct cpsw_vector	*rxv;
1146 
1147 	/* process every unprocessed channel */
1148 	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
1149 	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
1150 		if (!(ch_map & 0x01))
1151 			continue;
1152 
1153 		rxv = &cpsw->rxv[ch];
1154 		if (unlikely(rxv->budget > budget - num_rx))
1155 			cur_budget = budget - num_rx;
1156 		else
1157 			cur_budget = rxv->budget;
1158 
1159 		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
1160 		if (num_rx >= budget)
1161 			break;
1162 	}
1163 
1164 	if (num_rx < budget) {
1165 		napi_complete_done(napi_rx, num_rx);
1166 		writel(0xff, &cpsw->wr_regs->rx_en);
1167 	}
1168 
1169 	return num_rx;
1170 }
1171 
1172 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1173 {
1174 	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1175 	int num_rx;
1176 
1177 	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1178 	if (num_rx < budget) {
1179 		napi_complete_done(napi_rx, num_rx);
1180 		writel(0xff, &cpsw->wr_regs->rx_en);
1181 		if (cpsw->rx_irq_disabled) {
1182 			cpsw->rx_irq_disabled = false;
1183 			enable_irq(cpsw->irqs_table[0]);
1184 		}
1185 	}
1186 
1187 	return num_rx;
1188 }
1189 
1190 static inline void soft_reset(const char *module, void __iomem *reg)
1191 {
1192 	unsigned long timeout = jiffies + HZ;
1193 
1194 	writel_relaxed(1, reg);
1195 	do {
1196 		cpu_relax();
1197 	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1198 
1199 	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1200 }
1201 
1202 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1203 			       struct cpsw_priv *priv)
1204 {
1205 	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1206 	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1207 }
1208 
1209 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1210 {
1211 	struct cpsw_common *cpsw = priv->cpsw;
1212 	struct cpsw_slave *slave;
1213 	u32 shift, mask, val;
1214 
1215 	val = readl_relaxed(&cpsw->regs->ptype);
1216 
1217 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1218 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1219 	mask = 7 << shift;
1220 	val = val & mask;
1221 
1222 	return !val;
1223 }
1224 
1225 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1226 {
1227 	struct cpsw_common *cpsw = priv->cpsw;
1228 	struct cpsw_slave *slave;
1229 	u32 shift, mask, val;
1230 
1231 	val = readl_relaxed(&cpsw->regs->ptype);
1232 
1233 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1234 	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1235 	mask = (1 << --fifo) << shift;
1236 	val = on ? val | mask : val & ~mask;
1237 
1238 	writel_relaxed(val, &cpsw->regs->ptype);
1239 }
1240 
1241 static void _cpsw_adjust_link(struct cpsw_slave *slave,
1242 			      struct cpsw_priv *priv, bool *link)
1243 {
1244 	struct phy_device	*phy = slave->phy;
1245 	u32			mac_control = 0;
1246 	u32			slave_port;
1247 	struct cpsw_common *cpsw = priv->cpsw;
1248 
1249 	if (!phy)
1250 		return;
1251 
1252 	slave_port = cpsw_get_slave_port(slave->slave_num);
1253 
1254 	if (phy->link) {
1255 		mac_control = cpsw->data.mac_control;
1256 
1257 		/* enable forwarding */
1258 		cpsw_ale_control_set(cpsw->ale, slave_port,
1259 				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1260 
1261 		if (phy->speed == 1000)
1262 			mac_control |= BIT(7);	/* GIGABITEN	*/
1263 		if (phy->duplex)
1264 			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
1265 
1266 		/* set speed_in input in case RMII mode is used in 100Mbps */
1267 		if (phy->speed == 100)
1268 			mac_control |= BIT(15);
1269 		/* in band mode only works in 10Mbps RGMII mode */
1270 		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1271 			mac_control |= BIT(18); /* In Band mode */
1272 
1273 		if (priv->rx_pause)
1274 			mac_control |= BIT(3);
1275 
1276 		if (priv->tx_pause)
1277 			mac_control |= BIT(4);
1278 
1279 		*link = true;
1280 
1281 		if (priv->shp_cfg_speed &&
1282 		    priv->shp_cfg_speed != slave->phy->speed &&
1283 		    !cpsw_shp_is_off(priv))
1284 			dev_warn(priv->dev,
1285 				 "Speed was changed, CBS shaper speeds are changed!");
1286 	} else {
1287 		mac_control = 0;
1288 		/* disable forwarding */
1289 		cpsw_ale_control_set(cpsw->ale, slave_port,
1290 				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1291 	}
1292 
1293 	if (mac_control != slave->mac_control) {
1294 		phy_print_status(phy);
1295 		writel_relaxed(mac_control, &slave->sliver->mac_control);
1296 	}
1297 
1298 	slave->mac_control = mac_control;
1299 }
1300 
1301 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1302 {
1303 	int i, speed;
1304 
1305 	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1306 		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1307 			speed += cpsw->slaves[i].phy->speed;
1308 
1309 	return speed;
1310 }
1311 
1312 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1313 {
1314 	int i, rlim_ch_num;
1315 	int speed, ch_rate;
1316 
1317 	/* re-split resources only in case speed was changed */
1318 	speed = cpsw_get_common_speed(cpsw);
1319 	if (speed == cpsw->speed || !speed)
1320 		return 0;
1321 
1322 	cpsw->speed = speed;
1323 
1324 	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1325 		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1326 		if (!ch_rate)
1327 			break;
1328 
1329 		rlim_ch_num++;
1330 	}
1331 
1332 	/* cases not dependent on speed */
1333 	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1334 		return 0;
1335 
1336 	return 1;
1337 }
1338 
1339 static void cpsw_adjust_link(struct net_device *ndev)
1340 {
1341 	struct cpsw_priv	*priv = netdev_priv(ndev);
1342 	struct cpsw_common	*cpsw = priv->cpsw;
1343 	bool			link = false;
1344 
1345 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1346 
1347 	if (link) {
1348 		if (cpsw_need_resplit(cpsw))
1349 			cpsw_split_res(ndev);
1350 
1351 		netif_carrier_on(ndev);
1352 		if (netif_running(ndev))
1353 			netif_tx_wake_all_queues(ndev);
1354 	} else {
1355 		netif_carrier_off(ndev);
1356 		netif_tx_stop_all_queues(ndev);
1357 	}
1358 }
1359 
1360 static int cpsw_get_coalesce(struct net_device *ndev,
1361 				struct ethtool_coalesce *coal)
1362 {
1363 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1364 
1365 	coal->rx_coalesce_usecs = cpsw->coal_intvl;
1366 	return 0;
1367 }
1368 
1369 static int cpsw_set_coalesce(struct net_device *ndev,
1370 				struct ethtool_coalesce *coal)
1371 {
1372 	struct cpsw_priv *priv = netdev_priv(ndev);
1373 	u32 int_ctrl;
1374 	u32 num_interrupts = 0;
1375 	u32 prescale = 0;
1376 	u32 addnl_dvdr = 1;
1377 	u32 coal_intvl = 0;
1378 	struct cpsw_common *cpsw = priv->cpsw;
1379 
1380 	coal_intvl = coal->rx_coalesce_usecs;
1381 
1382 	int_ctrl =  readl(&cpsw->wr_regs->int_control);
1383 	prescale = cpsw->bus_freq_mhz * 4;
1384 
1385 	if (!coal->rx_coalesce_usecs) {
1386 		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1387 		goto update_return;
1388 	}
1389 
1390 	if (coal_intvl < CPSW_CMINTMIN_INTVL)
1391 		coal_intvl = CPSW_CMINTMIN_INTVL;
1392 
1393 	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1394 		/* Interrupt pacer works with 4us Pulse, we can
1395 		 * throttle further by dilating the 4us pulse.
1396 		 */
1397 		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1398 
1399 		if (addnl_dvdr > 1) {
1400 			prescale *= addnl_dvdr;
1401 			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1402 				coal_intvl = (CPSW_CMINTMAX_INTVL
1403 						* addnl_dvdr);
1404 		} else {
1405 			addnl_dvdr = 1;
1406 			coal_intvl = CPSW_CMINTMAX_INTVL;
1407 		}
1408 	}
1409 
1410 	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1411 	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1412 	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1413 
1414 	int_ctrl |= CPSW_INTPACEEN;
1415 	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1416 	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1417 
1418 update_return:
1419 	writel(int_ctrl, &cpsw->wr_regs->int_control);
1420 
1421 	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1422 	cpsw->coal_intvl = coal_intvl;
1423 
1424 	return 0;
1425 }
1426 
1427 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1428 {
1429 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1430 
1431 	switch (sset) {
1432 	case ETH_SS_STATS:
1433 		return (CPSW_STATS_COMMON_LEN +
1434 		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1435 		       CPSW_STATS_CH_LEN);
1436 	default:
1437 		return -EOPNOTSUPP;
1438 	}
1439 }
1440 
1441 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1442 {
1443 	int ch_stats_len;
1444 	int line;
1445 	int i;
1446 
1447 	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1448 	for (i = 0; i < ch_stats_len; i++) {
1449 		line = i % CPSW_STATS_CH_LEN;
1450 		snprintf(*p, ETH_GSTRING_LEN,
1451 			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1452 			 (long)(i / CPSW_STATS_CH_LEN),
1453 			 cpsw_gstrings_ch_stats[line].stat_string);
1454 		*p += ETH_GSTRING_LEN;
1455 	}
1456 }
1457 
1458 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1459 {
1460 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1461 	u8 *p = data;
1462 	int i;
1463 
1464 	switch (stringset) {
1465 	case ETH_SS_STATS:
1466 		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1467 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
1468 			       ETH_GSTRING_LEN);
1469 			p += ETH_GSTRING_LEN;
1470 		}
1471 
1472 		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1473 		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1474 		break;
1475 	}
1476 }
1477 
1478 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1479 				    struct ethtool_stats *stats, u64 *data)
1480 {
1481 	u8 *p;
1482 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1483 	struct cpdma_chan_stats ch_stats;
1484 	int i, l, ch;
1485 
1486 	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
1487 	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1488 		data[l] = readl(cpsw->hw_stats +
1489 				cpsw_gstrings_stats[l].stat_offset);
1490 
1491 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1492 		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1493 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1494 			p = (u8 *)&ch_stats +
1495 				cpsw_gstrings_ch_stats[i].stat_offset;
1496 			data[l] = *(u32 *)p;
1497 		}
1498 	}
1499 
1500 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1501 		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1502 		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1503 			p = (u8 *)&ch_stats +
1504 				cpsw_gstrings_ch_stats[i].stat_offset;
1505 			data[l] = *(u32 *)p;
1506 		}
1507 	}
1508 }
1509 
1510 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1511 					struct sk_buff *skb,
1512 					struct cpdma_chan *txch)
1513 {
1514 	struct cpsw_common *cpsw = priv->cpsw;
1515 
1516 	skb_tx_timestamp(skb);
1517 	return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1518 				 priv->emac_port + cpsw->data.dual_emac);
1519 }
1520 
1521 static inline void cpsw_add_dual_emac_def_ale_entries(
1522 		struct cpsw_priv *priv, struct cpsw_slave *slave,
1523 		u32 slave_port)
1524 {
1525 	struct cpsw_common *cpsw = priv->cpsw;
1526 	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1527 
1528 	if (cpsw->version == CPSW_VERSION_1)
1529 		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1530 	else
1531 		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1532 	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1533 			  port_mask, port_mask, 0);
1534 	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1535 			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
1536 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1537 			   HOST_PORT_NUM, ALE_VLAN |
1538 			   ALE_SECURE, slave->port_vlan);
1539 	cpsw_ale_control_set(cpsw->ale, slave_port,
1540 			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1541 }
1542 
1543 static void soft_reset_slave(struct cpsw_slave *slave)
1544 {
1545 	char name[32];
1546 
1547 	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1548 	soft_reset(name, &slave->sliver->soft_reset);
1549 }
1550 
1551 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1552 {
1553 	u32 slave_port;
1554 	struct phy_device *phy;
1555 	struct cpsw_common *cpsw = priv->cpsw;
1556 
1557 	soft_reset_slave(slave);
1558 
1559 	/* setup priority mapping */
1560 	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1561 
1562 	switch (cpsw->version) {
1563 	case CPSW_VERSION_1:
1564 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1565 		/* Increase RX FIFO size to 5 for supporting fullduplex
1566 		 * flow control mode
1567 		 */
1568 		slave_write(slave,
1569 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1570 			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1571 		break;
1572 	case CPSW_VERSION_2:
1573 	case CPSW_VERSION_3:
1574 	case CPSW_VERSION_4:
1575 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1576 		/* Increase RX FIFO size to 5 for supporting fullduplex
1577 		 * flow control mode
1578 		 */
1579 		slave_write(slave,
1580 			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1581 			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1582 		break;
1583 	}
1584 
1585 	/* setup max packet size, and mac address */
1586 	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1587 	cpsw_set_slave_mac(slave, priv);
1588 
1589 	slave->mac_control = 0;	/* no link yet */
1590 
1591 	slave_port = cpsw_get_slave_port(slave->slave_num);
1592 
1593 	if (cpsw->data.dual_emac)
1594 		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1595 	else
1596 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1597 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1598 
1599 	if (slave->data->phy_node) {
1600 		phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1601 				 &cpsw_adjust_link, 0, slave->data->phy_if);
1602 		if (!phy) {
1603 			dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1604 				slave->data->phy_node,
1605 				slave->slave_num);
1606 			return;
1607 		}
1608 	} else {
1609 		phy = phy_connect(priv->ndev, slave->data->phy_id,
1610 				 &cpsw_adjust_link, slave->data->phy_if);
1611 		if (IS_ERR(phy)) {
1612 			dev_err(priv->dev,
1613 				"phy \"%s\" not found on slave %d, err %ld\n",
1614 				slave->data->phy_id, slave->slave_num,
1615 				PTR_ERR(phy));
1616 			return;
1617 		}
1618 	}
1619 
1620 	slave->phy = phy;
1621 
1622 	phy_attached_info(slave->phy);
1623 
1624 	phy_start(slave->phy);
1625 
1626 	/* Configure GMII_SEL register */
1627 	cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
1628 }
1629 
1630 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1631 {
1632 	struct cpsw_common *cpsw = priv->cpsw;
1633 	const int vlan = cpsw->data.default_vlan;
1634 	u32 reg;
1635 	int i;
1636 	int unreg_mcast_mask;
1637 
1638 	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1639 	       CPSW2_PORT_VLAN;
1640 
1641 	writel(vlan, &cpsw->host_port_regs->port_vlan);
1642 
1643 	for (i = 0; i < cpsw->data.slaves; i++)
1644 		slave_write(cpsw->slaves + i, vlan, reg);
1645 
1646 	if (priv->ndev->flags & IFF_ALLMULTI)
1647 		unreg_mcast_mask = ALE_ALL_PORTS;
1648 	else
1649 		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1650 
1651 	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1652 			  ALE_ALL_PORTS, ALE_ALL_PORTS,
1653 			  unreg_mcast_mask);
1654 }
1655 
1656 static void cpsw_init_host_port(struct cpsw_priv *priv)
1657 {
1658 	u32 fifo_mode;
1659 	u32 control_reg;
1660 	struct cpsw_common *cpsw = priv->cpsw;
1661 
1662 	/* soft reset the controller and initialize ale */
1663 	soft_reset("cpsw", &cpsw->regs->soft_reset);
1664 	cpsw_ale_start(cpsw->ale);
1665 
1666 	/* switch to vlan unaware mode */
1667 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1668 			     CPSW_ALE_VLAN_AWARE);
1669 	control_reg = readl(&cpsw->regs->control);
1670 	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1671 	writel(control_reg, &cpsw->regs->control);
1672 	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1673 		     CPSW_FIFO_NORMAL_MODE;
1674 	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1675 
1676 	/* setup host port priority mapping */
1677 	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1678 		       &cpsw->host_port_regs->cpdma_tx_pri_map);
1679 	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1680 
1681 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1682 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1683 
1684 	if (!cpsw->data.dual_emac) {
1685 		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1686 				   0, 0);
1687 		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1688 				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1689 	}
1690 }
1691 
1692 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1693 {
1694 	struct cpsw_common *cpsw = priv->cpsw;
1695 	struct sk_buff *skb;
1696 	int ch_buf_num;
1697 	int ch, i, ret;
1698 
1699 	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1700 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1701 		for (i = 0; i < ch_buf_num; i++) {
1702 			skb = __netdev_alloc_skb_ip_align(priv->ndev,
1703 							  cpsw->rx_packet_max,
1704 							  GFP_KERNEL);
1705 			if (!skb) {
1706 				cpsw_err(priv, ifup, "cannot allocate skb\n");
1707 				return -ENOMEM;
1708 			}
1709 
1710 			skb_set_queue_mapping(skb, ch);
1711 			ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1712 						skb->data, skb_tailroom(skb),
1713 						0);
1714 			if (ret < 0) {
1715 				cpsw_err(priv, ifup,
1716 					 "cannot submit skb to channel %d rx, error %d\n",
1717 					 ch, ret);
1718 				kfree_skb(skb);
1719 				return ret;
1720 			}
1721 			kmemleak_not_leak(skb);
1722 		}
1723 
1724 		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1725 			  ch, ch_buf_num);
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1732 {
1733 	u32 slave_port;
1734 
1735 	slave_port = cpsw_get_slave_port(slave->slave_num);
1736 
1737 	if (!slave->phy)
1738 		return;
1739 	phy_stop(slave->phy);
1740 	phy_disconnect(slave->phy);
1741 	slave->phy = NULL;
1742 	cpsw_ale_control_set(cpsw->ale, slave_port,
1743 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1744 	soft_reset_slave(slave);
1745 }
1746 
1747 static int cpsw_tc_to_fifo(int tc, int num_tc)
1748 {
1749 	if (tc == num_tc - 1)
1750 		return 0;
1751 
1752 	return CPSW_FIFO_SHAPERS_NUM - tc;
1753 }
1754 
1755 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1756 {
1757 	struct cpsw_common *cpsw = priv->cpsw;
1758 	u32 val = 0, send_pct, shift;
1759 	struct cpsw_slave *slave;
1760 	int pct = 0, i;
1761 
1762 	if (bw > priv->shp_cfg_speed * 1000)
1763 		goto err;
1764 
1765 	/* shaping has to stay enabled for highest fifos linearly
1766 	 * and fifo bw no more then interface can allow
1767 	 */
1768 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1769 	send_pct = slave_read(slave, SEND_PERCENT);
1770 	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1771 		if (!bw) {
1772 			if (i >= fifo || !priv->fifo_bw[i])
1773 				continue;
1774 
1775 			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1776 			continue;
1777 		}
1778 
1779 		if (!priv->fifo_bw[i] && i > fifo) {
1780 			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1781 			return -EINVAL;
1782 		}
1783 
1784 		shift = (i - 1) * 8;
1785 		if (i == fifo) {
1786 			send_pct &= ~(CPSW_PCT_MASK << shift);
1787 			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1788 			if (!val)
1789 				val = 1;
1790 
1791 			send_pct |= val << shift;
1792 			pct += val;
1793 			continue;
1794 		}
1795 
1796 		if (priv->fifo_bw[i])
1797 			pct += (send_pct >> shift) & CPSW_PCT_MASK;
1798 	}
1799 
1800 	if (pct >= 100)
1801 		goto err;
1802 
1803 	slave_write(slave, send_pct, SEND_PERCENT);
1804 	priv->fifo_bw[fifo] = bw;
1805 
1806 	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1807 		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1808 
1809 	return 0;
1810 err:
1811 	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1812 	return -EINVAL;
1813 }
1814 
1815 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1816 {
1817 	struct cpsw_common *cpsw = priv->cpsw;
1818 	struct cpsw_slave *slave;
1819 	u32 tx_in_ctl_rg, val;
1820 	int ret;
1821 
1822 	ret = cpsw_set_fifo_bw(priv, fifo, bw);
1823 	if (ret)
1824 		return ret;
1825 
1826 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1827 	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1828 		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1829 
1830 	if (!bw)
1831 		cpsw_fifo_shp_on(priv, fifo, bw);
1832 
1833 	val = slave_read(slave, tx_in_ctl_rg);
1834 	if (cpsw_shp_is_off(priv)) {
1835 		/* disable FIFOs rate limited queues */
1836 		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1837 
1838 		/* set type of FIFO queues to normal priority mode */
1839 		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1840 
1841 		/* set type of FIFO queues to be rate limited */
1842 		if (bw)
1843 			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1844 		else
1845 			priv->shp_cfg_speed = 0;
1846 	}
1847 
1848 	/* toggle a FIFO rate limited queue */
1849 	if (bw)
1850 		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1851 	else
1852 		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1853 	slave_write(slave, val, tx_in_ctl_rg);
1854 
1855 	/* FIFO transmit shape enable */
1856 	cpsw_fifo_shp_on(priv, fifo, bw);
1857 	return 0;
1858 }
1859 
1860 /* Defaults:
1861  * class A - prio 3
1862  * class B - prio 2
1863  * shaping for class A should be set first
1864  */
1865 static int cpsw_set_cbs(struct net_device *ndev,
1866 			struct tc_cbs_qopt_offload *qopt)
1867 {
1868 	struct cpsw_priv *priv = netdev_priv(ndev);
1869 	struct cpsw_common *cpsw = priv->cpsw;
1870 	struct cpsw_slave *slave;
1871 	int prev_speed = 0;
1872 	int tc, ret, fifo;
1873 	u32 bw = 0;
1874 
1875 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1876 
1877 	/* enable channels in backward order, as highest FIFOs must be rate
1878 	 * limited first and for compliance with CPDMA rate limited channels
1879 	 * that also used in bacward order. FIFO0 cannot be rate limited.
1880 	 */
1881 	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1882 	if (!fifo) {
1883 		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1884 		return -EINVAL;
1885 	}
1886 
1887 	/* do nothing, it's disabled anyway */
1888 	if (!qopt->enable && !priv->fifo_bw[fifo])
1889 		return 0;
1890 
1891 	/* shapers can be set if link speed is known */
1892 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1893 	if (slave->phy && slave->phy->link) {
1894 		if (priv->shp_cfg_speed &&
1895 		    priv->shp_cfg_speed != slave->phy->speed)
1896 			prev_speed = priv->shp_cfg_speed;
1897 
1898 		priv->shp_cfg_speed = slave->phy->speed;
1899 	}
1900 
1901 	if (!priv->shp_cfg_speed) {
1902 		dev_err(priv->dev, "Link speed is not known");
1903 		return -1;
1904 	}
1905 
1906 	ret = pm_runtime_get_sync(cpsw->dev);
1907 	if (ret < 0) {
1908 		pm_runtime_put_noidle(cpsw->dev);
1909 		return ret;
1910 	}
1911 
1912 	bw = qopt->enable ? qopt->idleslope : 0;
1913 	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1914 	if (ret) {
1915 		priv->shp_cfg_speed = prev_speed;
1916 		prev_speed = 0;
1917 	}
1918 
1919 	if (bw && prev_speed)
1920 		dev_warn(priv->dev,
1921 			 "Speed was changed, CBS shaper speeds are changed!");
1922 
1923 	pm_runtime_put_sync(cpsw->dev);
1924 	return ret;
1925 }
1926 
1927 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1928 {
1929 	int fifo, bw;
1930 
1931 	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1932 		bw = priv->fifo_bw[fifo];
1933 		if (!bw)
1934 			continue;
1935 
1936 		cpsw_set_fifo_rlimit(priv, fifo, bw);
1937 	}
1938 }
1939 
1940 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1941 {
1942 	struct cpsw_common *cpsw = priv->cpsw;
1943 	u32 tx_prio_map = 0;
1944 	int i, tc, fifo;
1945 	u32 tx_prio_rg;
1946 
1947 	if (!priv->mqprio_hw)
1948 		return;
1949 
1950 	for (i = 0; i < 8; i++) {
1951 		tc = netdev_get_prio_tc_map(priv->ndev, i);
1952 		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1953 		tx_prio_map |= fifo << (4 * i);
1954 	}
1955 
1956 	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1957 		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1958 
1959 	slave_write(slave, tx_prio_map, tx_prio_rg);
1960 }
1961 
1962 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1963 {
1964 	struct cpsw_priv *priv = arg;
1965 
1966 	if (!vdev)
1967 		return 0;
1968 
1969 	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1970 	return 0;
1971 }
1972 
1973 /* restore resources after port reset */
1974 static void cpsw_restore(struct cpsw_priv *priv)
1975 {
1976 	/* restore vlan configurations */
1977 	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1978 
1979 	/* restore MQPRIO offload */
1980 	for_each_slave(priv, cpsw_mqprio_resume, priv);
1981 
1982 	/* restore CBS offload */
1983 	for_each_slave(priv, cpsw_cbs_resume, priv);
1984 }
1985 
1986 static int cpsw_ndo_open(struct net_device *ndev)
1987 {
1988 	struct cpsw_priv *priv = netdev_priv(ndev);
1989 	struct cpsw_common *cpsw = priv->cpsw;
1990 	int ret;
1991 	u32 reg;
1992 
1993 	ret = pm_runtime_get_sync(cpsw->dev);
1994 	if (ret < 0) {
1995 		pm_runtime_put_noidle(cpsw->dev);
1996 		return ret;
1997 	}
1998 
1999 	netif_carrier_off(ndev);
2000 
2001 	/* Notify the stack of the actual queue counts. */
2002 	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
2003 	if (ret) {
2004 		dev_err(priv->dev, "cannot set real number of tx queues\n");
2005 		goto err_cleanup;
2006 	}
2007 
2008 	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
2009 	if (ret) {
2010 		dev_err(priv->dev, "cannot set real number of rx queues\n");
2011 		goto err_cleanup;
2012 	}
2013 
2014 	reg = cpsw->version;
2015 
2016 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
2017 		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
2018 		 CPSW_RTL_VERSION(reg));
2019 
2020 	/* Initialize host and slave ports */
2021 	if (!cpsw->usage_count)
2022 		cpsw_init_host_port(priv);
2023 	for_each_slave(priv, cpsw_slave_open, priv);
2024 
2025 	/* Add default VLAN */
2026 	if (!cpsw->data.dual_emac)
2027 		cpsw_add_default_vlan(priv);
2028 	else
2029 		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
2030 				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
2031 
2032 	/* initialize shared resources for every ndev */
2033 	if (!cpsw->usage_count) {
2034 		/* disable priority elevation */
2035 		writel_relaxed(0, &cpsw->regs->ptype);
2036 
2037 		/* enable statistics collection only on all ports */
2038 		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
2039 
2040 		/* Enable internal fifo flow control */
2041 		writel(0x7, &cpsw->regs->flow_control);
2042 
2043 		napi_enable(&cpsw->napi_rx);
2044 		napi_enable(&cpsw->napi_tx);
2045 
2046 		if (cpsw->tx_irq_disabled) {
2047 			cpsw->tx_irq_disabled = false;
2048 			enable_irq(cpsw->irqs_table[1]);
2049 		}
2050 
2051 		if (cpsw->rx_irq_disabled) {
2052 			cpsw->rx_irq_disabled = false;
2053 			enable_irq(cpsw->irqs_table[0]);
2054 		}
2055 
2056 		ret = cpsw_fill_rx_channels(priv);
2057 		if (ret < 0)
2058 			goto err_cleanup;
2059 
2060 		if (cpts_register(cpsw->cpts))
2061 			dev_err(priv->dev, "error registering cpts device\n");
2062 
2063 	}
2064 
2065 	cpsw_restore(priv);
2066 
2067 	/* Enable Interrupt pacing if configured */
2068 	if (cpsw->coal_intvl != 0) {
2069 		struct ethtool_coalesce coal;
2070 
2071 		coal.rx_coalesce_usecs = cpsw->coal_intvl;
2072 		cpsw_set_coalesce(ndev, &coal);
2073 	}
2074 
2075 	cpdma_ctlr_start(cpsw->dma);
2076 	cpsw_intr_enable(cpsw);
2077 	cpsw->usage_count++;
2078 
2079 	return 0;
2080 
2081 err_cleanup:
2082 	cpdma_ctlr_stop(cpsw->dma);
2083 	for_each_slave(priv, cpsw_slave_stop, cpsw);
2084 	pm_runtime_put_sync(cpsw->dev);
2085 	netif_carrier_off(priv->ndev);
2086 	return ret;
2087 }
2088 
2089 static int cpsw_ndo_stop(struct net_device *ndev)
2090 {
2091 	struct cpsw_priv *priv = netdev_priv(ndev);
2092 	struct cpsw_common *cpsw = priv->cpsw;
2093 
2094 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
2095 	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
2096 	netif_tx_stop_all_queues(priv->ndev);
2097 	netif_carrier_off(priv->ndev);
2098 
2099 	if (cpsw->usage_count <= 1) {
2100 		napi_disable(&cpsw->napi_rx);
2101 		napi_disable(&cpsw->napi_tx);
2102 		cpts_unregister(cpsw->cpts);
2103 		cpsw_intr_disable(cpsw);
2104 		cpdma_ctlr_stop(cpsw->dma);
2105 		cpsw_ale_stop(cpsw->ale);
2106 	}
2107 	for_each_slave(priv, cpsw_slave_stop, cpsw);
2108 
2109 	if (cpsw_need_resplit(cpsw))
2110 		cpsw_split_res(ndev);
2111 
2112 	cpsw->usage_count--;
2113 	pm_runtime_put_sync(cpsw->dev);
2114 	return 0;
2115 }
2116 
2117 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2118 				       struct net_device *ndev)
2119 {
2120 	struct cpsw_priv *priv = netdev_priv(ndev);
2121 	struct cpsw_common *cpsw = priv->cpsw;
2122 	struct cpts *cpts = cpsw->cpts;
2123 	struct netdev_queue *txq;
2124 	struct cpdma_chan *txch;
2125 	int ret, q_idx;
2126 
2127 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
2128 		cpsw_err(priv, tx_err, "packet pad failed\n");
2129 		ndev->stats.tx_dropped++;
2130 		return NET_XMIT_DROP;
2131 	}
2132 
2133 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2134 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
2135 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2136 
2137 	q_idx = skb_get_queue_mapping(skb);
2138 	if (q_idx >= cpsw->tx_ch_num)
2139 		q_idx = q_idx % cpsw->tx_ch_num;
2140 
2141 	txch = cpsw->txv[q_idx].ch;
2142 	txq = netdev_get_tx_queue(ndev, q_idx);
2143 	ret = cpsw_tx_packet_submit(priv, skb, txch);
2144 	if (unlikely(ret != 0)) {
2145 		cpsw_err(priv, tx_err, "desc submit failed\n");
2146 		goto fail;
2147 	}
2148 
2149 	/* If there is no more tx desc left free then we need to
2150 	 * tell the kernel to stop sending us tx frames.
2151 	 */
2152 	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2153 		netif_tx_stop_queue(txq);
2154 
2155 		/* Barrier, so that stop_queue visible to other cpus */
2156 		smp_mb__after_atomic();
2157 
2158 		if (cpdma_check_free_tx_desc(txch))
2159 			netif_tx_wake_queue(txq);
2160 	}
2161 
2162 	return NETDEV_TX_OK;
2163 fail:
2164 	ndev->stats.tx_dropped++;
2165 	netif_tx_stop_queue(txq);
2166 
2167 	/* Barrier, so that stop_queue visible to other cpus */
2168 	smp_mb__after_atomic();
2169 
2170 	if (cpdma_check_free_tx_desc(txch))
2171 		netif_tx_wake_queue(txq);
2172 
2173 	return NETDEV_TX_BUSY;
2174 }
2175 
2176 #if IS_ENABLED(CONFIG_TI_CPTS)
2177 
2178 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
2179 {
2180 	struct cpsw_common *cpsw = priv->cpsw;
2181 	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
2182 	u32 ts_en, seq_id;
2183 
2184 	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
2185 		slave_write(slave, 0, CPSW1_TS_CTL);
2186 		return;
2187 	}
2188 
2189 	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2190 	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2191 
2192 	if (priv->tx_ts_enabled)
2193 		ts_en |= CPSW_V1_TS_TX_EN;
2194 
2195 	if (priv->rx_ts_enabled)
2196 		ts_en |= CPSW_V1_TS_RX_EN;
2197 
2198 	slave_write(slave, ts_en, CPSW1_TS_CTL);
2199 	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2200 }
2201 
2202 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2203 {
2204 	struct cpsw_slave *slave;
2205 	struct cpsw_common *cpsw = priv->cpsw;
2206 	u32 ctrl, mtype;
2207 
2208 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2209 
2210 	ctrl = slave_read(slave, CPSW2_CONTROL);
2211 	switch (cpsw->version) {
2212 	case CPSW_VERSION_2:
2213 		ctrl &= ~CTRL_V2_ALL_TS_MASK;
2214 
2215 		if (priv->tx_ts_enabled)
2216 			ctrl |= CTRL_V2_TX_TS_BITS;
2217 
2218 		if (priv->rx_ts_enabled)
2219 			ctrl |= CTRL_V2_RX_TS_BITS;
2220 		break;
2221 	case CPSW_VERSION_3:
2222 	default:
2223 		ctrl &= ~CTRL_V3_ALL_TS_MASK;
2224 
2225 		if (priv->tx_ts_enabled)
2226 			ctrl |= CTRL_V3_TX_TS_BITS;
2227 
2228 		if (priv->rx_ts_enabled)
2229 			ctrl |= CTRL_V3_RX_TS_BITS;
2230 		break;
2231 	}
2232 
2233 	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2234 
2235 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2236 	slave_write(slave, ctrl, CPSW2_CONTROL);
2237 	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
2238 	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
2239 }
2240 
2241 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2242 {
2243 	struct cpsw_priv *priv = netdev_priv(dev);
2244 	struct hwtstamp_config cfg;
2245 	struct cpsw_common *cpsw = priv->cpsw;
2246 
2247 	if (cpsw->version != CPSW_VERSION_1 &&
2248 	    cpsw->version != CPSW_VERSION_2 &&
2249 	    cpsw->version != CPSW_VERSION_3)
2250 		return -EOPNOTSUPP;
2251 
2252 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2253 		return -EFAULT;
2254 
2255 	/* reserved for future extensions */
2256 	if (cfg.flags)
2257 		return -EINVAL;
2258 
2259 	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
2260 		return -ERANGE;
2261 
2262 	switch (cfg.rx_filter) {
2263 	case HWTSTAMP_FILTER_NONE:
2264 		priv->rx_ts_enabled = 0;
2265 		break;
2266 	case HWTSTAMP_FILTER_ALL:
2267 	case HWTSTAMP_FILTER_NTP_ALL:
2268 		return -ERANGE;
2269 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2270 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2271 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2272 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2273 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2274 		break;
2275 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2276 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2277 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2278 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2279 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2280 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2281 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2282 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2283 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2284 		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2285 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2286 		break;
2287 	default:
2288 		return -ERANGE;
2289 	}
2290 
2291 	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
2292 
2293 	switch (cpsw->version) {
2294 	case CPSW_VERSION_1:
2295 		cpsw_hwtstamp_v1(priv);
2296 		break;
2297 	case CPSW_VERSION_2:
2298 	case CPSW_VERSION_3:
2299 		cpsw_hwtstamp_v2(priv);
2300 		break;
2301 	default:
2302 		WARN_ON(1);
2303 	}
2304 
2305 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2306 }
2307 
2308 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2309 {
2310 	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2311 	struct cpsw_priv *priv = netdev_priv(dev);
2312 	struct hwtstamp_config cfg;
2313 
2314 	if (cpsw->version != CPSW_VERSION_1 &&
2315 	    cpsw->version != CPSW_VERSION_2 &&
2316 	    cpsw->version != CPSW_VERSION_3)
2317 		return -EOPNOTSUPP;
2318 
2319 	cfg.flags = 0;
2320 	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2321 	cfg.rx_filter = priv->rx_ts_enabled;
2322 
2323 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2324 }
2325 #else
2326 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2327 {
2328 	return -EOPNOTSUPP;
2329 }
2330 
2331 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2332 {
2333 	return -EOPNOTSUPP;
2334 }
2335 #endif /*CONFIG_TI_CPTS*/
2336 
2337 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2338 {
2339 	struct cpsw_priv *priv = netdev_priv(dev);
2340 	struct cpsw_common *cpsw = priv->cpsw;
2341 	int slave_no = cpsw_slave_index(cpsw, priv);
2342 
2343 	if (!netif_running(dev))
2344 		return -EINVAL;
2345 
2346 	switch (cmd) {
2347 	case SIOCSHWTSTAMP:
2348 		return cpsw_hwtstamp_set(dev, req);
2349 	case SIOCGHWTSTAMP:
2350 		return cpsw_hwtstamp_get(dev, req);
2351 	}
2352 
2353 	if (!cpsw->slaves[slave_no].phy)
2354 		return -EOPNOTSUPP;
2355 	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2356 }
2357 
2358 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2359 {
2360 	struct cpsw_priv *priv = netdev_priv(ndev);
2361 	struct cpsw_common *cpsw = priv->cpsw;
2362 	int ch;
2363 
2364 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2365 	ndev->stats.tx_errors++;
2366 	cpsw_intr_disable(cpsw);
2367 	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2368 		cpdma_chan_stop(cpsw->txv[ch].ch);
2369 		cpdma_chan_start(cpsw->txv[ch].ch);
2370 	}
2371 
2372 	cpsw_intr_enable(cpsw);
2373 	netif_trans_update(ndev);
2374 	netif_tx_wake_all_queues(ndev);
2375 }
2376 
2377 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2378 {
2379 	struct cpsw_priv *priv = netdev_priv(ndev);
2380 	struct sockaddr *addr = (struct sockaddr *)p;
2381 	struct cpsw_common *cpsw = priv->cpsw;
2382 	int flags = 0;
2383 	u16 vid = 0;
2384 	int ret;
2385 
2386 	if (!is_valid_ether_addr(addr->sa_data))
2387 		return -EADDRNOTAVAIL;
2388 
2389 	ret = pm_runtime_get_sync(cpsw->dev);
2390 	if (ret < 0) {
2391 		pm_runtime_put_noidle(cpsw->dev);
2392 		return ret;
2393 	}
2394 
2395 	if (cpsw->data.dual_emac) {
2396 		vid = cpsw->slaves[priv->emac_port].port_vlan;
2397 		flags = ALE_VLAN;
2398 	}
2399 
2400 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2401 			   flags, vid);
2402 	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2403 			   flags, vid);
2404 
2405 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2406 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2407 	for_each_slave(priv, cpsw_set_slave_mac, priv);
2408 
2409 	pm_runtime_put(cpsw->dev);
2410 
2411 	return 0;
2412 }
2413 
2414 #ifdef CONFIG_NET_POLL_CONTROLLER
2415 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2416 {
2417 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2418 
2419 	cpsw_intr_disable(cpsw);
2420 	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2421 	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2422 	cpsw_intr_enable(cpsw);
2423 }
2424 #endif
2425 
2426 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2427 				unsigned short vid)
2428 {
2429 	int ret;
2430 	int unreg_mcast_mask = 0;
2431 	int mcast_mask;
2432 	u32 port_mask;
2433 	struct cpsw_common *cpsw = priv->cpsw;
2434 
2435 	if (cpsw->data.dual_emac) {
2436 		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2437 
2438 		mcast_mask = ALE_PORT_HOST;
2439 		if (priv->ndev->flags & IFF_ALLMULTI)
2440 			unreg_mcast_mask = mcast_mask;
2441 	} else {
2442 		port_mask = ALE_ALL_PORTS;
2443 		mcast_mask = port_mask;
2444 
2445 		if (priv->ndev->flags & IFF_ALLMULTI)
2446 			unreg_mcast_mask = ALE_ALL_PORTS;
2447 		else
2448 			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2449 	}
2450 
2451 	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2452 				unreg_mcast_mask);
2453 	if (ret != 0)
2454 		return ret;
2455 
2456 	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2457 				 HOST_PORT_NUM, ALE_VLAN, vid);
2458 	if (ret != 0)
2459 		goto clean_vid;
2460 
2461 	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2462 				 mcast_mask, ALE_VLAN, vid, 0);
2463 	if (ret != 0)
2464 		goto clean_vlan_ucast;
2465 	return 0;
2466 
2467 clean_vlan_ucast:
2468 	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2469 			   HOST_PORT_NUM, ALE_VLAN, vid);
2470 clean_vid:
2471 	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2472 	return ret;
2473 }
2474 
2475 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2476 				    __be16 proto, u16 vid)
2477 {
2478 	struct cpsw_priv *priv = netdev_priv(ndev);
2479 	struct cpsw_common *cpsw = priv->cpsw;
2480 	int ret;
2481 
2482 	if (vid == cpsw->data.default_vlan)
2483 		return 0;
2484 
2485 	ret = pm_runtime_get_sync(cpsw->dev);
2486 	if (ret < 0) {
2487 		pm_runtime_put_noidle(cpsw->dev);
2488 		return ret;
2489 	}
2490 
2491 	if (cpsw->data.dual_emac) {
2492 		/* In dual EMAC, reserved VLAN id should not be used for
2493 		 * creating VLAN interfaces as this can break the dual
2494 		 * EMAC port separation
2495 		 */
2496 		int i;
2497 
2498 		for (i = 0; i < cpsw->data.slaves; i++) {
2499 			if (vid == cpsw->slaves[i].port_vlan) {
2500 				ret = -EINVAL;
2501 				goto err;
2502 			}
2503 		}
2504 	}
2505 
2506 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2507 	ret = cpsw_add_vlan_ale_entry(priv, vid);
2508 err:
2509 	pm_runtime_put(cpsw->dev);
2510 	return ret;
2511 }
2512 
2513 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2514 				     __be16 proto, u16 vid)
2515 {
2516 	struct cpsw_priv *priv = netdev_priv(ndev);
2517 	struct cpsw_common *cpsw = priv->cpsw;
2518 	int ret;
2519 
2520 	if (vid == cpsw->data.default_vlan)
2521 		return 0;
2522 
2523 	ret = pm_runtime_get_sync(cpsw->dev);
2524 	if (ret < 0) {
2525 		pm_runtime_put_noidle(cpsw->dev);
2526 		return ret;
2527 	}
2528 
2529 	if (cpsw->data.dual_emac) {
2530 		int i;
2531 
2532 		for (i = 0; i < cpsw->data.slaves; i++) {
2533 			if (vid == cpsw->slaves[i].port_vlan)
2534 				goto err;
2535 		}
2536 	}
2537 
2538 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2539 	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2540 	ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2541 				  HOST_PORT_NUM, ALE_VLAN, vid);
2542 	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2543 				  0, ALE_VLAN, vid);
2544 	ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
2545 err:
2546 	pm_runtime_put(cpsw->dev);
2547 	return ret;
2548 }
2549 
2550 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2551 {
2552 	struct cpsw_priv *priv = netdev_priv(ndev);
2553 	struct cpsw_common *cpsw = priv->cpsw;
2554 	struct cpsw_slave *slave;
2555 	u32 min_rate;
2556 	u32 ch_rate;
2557 	int i, ret;
2558 
2559 	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2560 	if (ch_rate == rate)
2561 		return 0;
2562 
2563 	ch_rate = rate * 1000;
2564 	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2565 	if ((ch_rate < min_rate && ch_rate)) {
2566 		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2567 			min_rate);
2568 		return -EINVAL;
2569 	}
2570 
2571 	if (rate > cpsw->speed) {
2572 		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2573 		return -EINVAL;
2574 	}
2575 
2576 	ret = pm_runtime_get_sync(cpsw->dev);
2577 	if (ret < 0) {
2578 		pm_runtime_put_noidle(cpsw->dev);
2579 		return ret;
2580 	}
2581 
2582 	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2583 	pm_runtime_put(cpsw->dev);
2584 
2585 	if (ret)
2586 		return ret;
2587 
2588 	/* update rates for slaves tx queues */
2589 	for (i = 0; i < cpsw->data.slaves; i++) {
2590 		slave = &cpsw->slaves[i];
2591 		if (!slave->ndev)
2592 			continue;
2593 
2594 		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2595 	}
2596 
2597 	cpsw_split_res(ndev);
2598 	return ret;
2599 }
2600 
2601 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2602 {
2603 	struct tc_mqprio_qopt_offload *mqprio = type_data;
2604 	struct cpsw_priv *priv = netdev_priv(ndev);
2605 	struct cpsw_common *cpsw = priv->cpsw;
2606 	int fifo, num_tc, count, offset;
2607 	struct cpsw_slave *slave;
2608 	u32 tx_prio_map = 0;
2609 	int i, tc, ret;
2610 
2611 	num_tc = mqprio->qopt.num_tc;
2612 	if (num_tc > CPSW_TC_NUM)
2613 		return -EINVAL;
2614 
2615 	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2616 		return -EINVAL;
2617 
2618 	ret = pm_runtime_get_sync(cpsw->dev);
2619 	if (ret < 0) {
2620 		pm_runtime_put_noidle(cpsw->dev);
2621 		return ret;
2622 	}
2623 
2624 	if (num_tc) {
2625 		for (i = 0; i < 8; i++) {
2626 			tc = mqprio->qopt.prio_tc_map[i];
2627 			fifo = cpsw_tc_to_fifo(tc, num_tc);
2628 			tx_prio_map |= fifo << (4 * i);
2629 		}
2630 
2631 		netdev_set_num_tc(ndev, num_tc);
2632 		for (i = 0; i < num_tc; i++) {
2633 			count = mqprio->qopt.count[i];
2634 			offset = mqprio->qopt.offset[i];
2635 			netdev_set_tc_queue(ndev, i, count, offset);
2636 		}
2637 	}
2638 
2639 	if (!mqprio->qopt.hw) {
2640 		/* restore default configuration */
2641 		netdev_reset_tc(ndev);
2642 		tx_prio_map = TX_PRIORITY_MAPPING;
2643 	}
2644 
2645 	priv->mqprio_hw = mqprio->qopt.hw;
2646 
2647 	offset = cpsw->version == CPSW_VERSION_1 ?
2648 		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2649 
2650 	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2651 	slave_write(slave, tx_prio_map, offset);
2652 
2653 	pm_runtime_put_sync(cpsw->dev);
2654 
2655 	return 0;
2656 }
2657 
2658 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2659 			     void *type_data)
2660 {
2661 	switch (type) {
2662 	case TC_SETUP_QDISC_CBS:
2663 		return cpsw_set_cbs(ndev, type_data);
2664 
2665 	case TC_SETUP_QDISC_MQPRIO:
2666 		return cpsw_set_mqprio(ndev, type_data);
2667 
2668 	default:
2669 		return -EOPNOTSUPP;
2670 	}
2671 }
2672 
2673 static const struct net_device_ops cpsw_netdev_ops = {
2674 	.ndo_open		= cpsw_ndo_open,
2675 	.ndo_stop		= cpsw_ndo_stop,
2676 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
2677 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
2678 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
2679 	.ndo_validate_addr	= eth_validate_addr,
2680 	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
2681 	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
2682 	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
2683 #ifdef CONFIG_NET_POLL_CONTROLLER
2684 	.ndo_poll_controller	= cpsw_ndo_poll_controller,
2685 #endif
2686 	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
2687 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
2688 	.ndo_setup_tc           = cpsw_ndo_setup_tc,
2689 };
2690 
2691 static int cpsw_get_regs_len(struct net_device *ndev)
2692 {
2693 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2694 
2695 	return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2696 }
2697 
2698 static void cpsw_get_regs(struct net_device *ndev,
2699 			  struct ethtool_regs *regs, void *p)
2700 {
2701 	u32 *reg = p;
2702 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2703 
2704 	/* update CPSW IP version */
2705 	regs->version = cpsw->version;
2706 
2707 	cpsw_ale_dump(cpsw->ale, reg);
2708 }
2709 
2710 static void cpsw_get_drvinfo(struct net_device *ndev,
2711 			     struct ethtool_drvinfo *info)
2712 {
2713 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2714 	struct platform_device	*pdev = to_platform_device(cpsw->dev);
2715 
2716 	strlcpy(info->driver, "cpsw", sizeof(info->driver));
2717 	strlcpy(info->version, "1.0", sizeof(info->version));
2718 	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2719 }
2720 
2721 static u32 cpsw_get_msglevel(struct net_device *ndev)
2722 {
2723 	struct cpsw_priv *priv = netdev_priv(ndev);
2724 	return priv->msg_enable;
2725 }
2726 
2727 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2728 {
2729 	struct cpsw_priv *priv = netdev_priv(ndev);
2730 	priv->msg_enable = value;
2731 }
2732 
2733 #if IS_ENABLED(CONFIG_TI_CPTS)
2734 static int cpsw_get_ts_info(struct net_device *ndev,
2735 			    struct ethtool_ts_info *info)
2736 {
2737 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2738 
2739 	info->so_timestamping =
2740 		SOF_TIMESTAMPING_TX_HARDWARE |
2741 		SOF_TIMESTAMPING_TX_SOFTWARE |
2742 		SOF_TIMESTAMPING_RX_HARDWARE |
2743 		SOF_TIMESTAMPING_RX_SOFTWARE |
2744 		SOF_TIMESTAMPING_SOFTWARE |
2745 		SOF_TIMESTAMPING_RAW_HARDWARE;
2746 	info->phc_index = cpsw->cpts->phc_index;
2747 	info->tx_types =
2748 		(1 << HWTSTAMP_TX_OFF) |
2749 		(1 << HWTSTAMP_TX_ON);
2750 	info->rx_filters =
2751 		(1 << HWTSTAMP_FILTER_NONE) |
2752 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2753 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2754 	return 0;
2755 }
2756 #else
2757 static int cpsw_get_ts_info(struct net_device *ndev,
2758 			    struct ethtool_ts_info *info)
2759 {
2760 	info->so_timestamping =
2761 		SOF_TIMESTAMPING_TX_SOFTWARE |
2762 		SOF_TIMESTAMPING_RX_SOFTWARE |
2763 		SOF_TIMESTAMPING_SOFTWARE;
2764 	info->phc_index = -1;
2765 	info->tx_types = 0;
2766 	info->rx_filters = 0;
2767 	return 0;
2768 }
2769 #endif
2770 
2771 static int cpsw_get_link_ksettings(struct net_device *ndev,
2772 				   struct ethtool_link_ksettings *ecmd)
2773 {
2774 	struct cpsw_priv *priv = netdev_priv(ndev);
2775 	struct cpsw_common *cpsw = priv->cpsw;
2776 	int slave_no = cpsw_slave_index(cpsw, priv);
2777 
2778 	if (!cpsw->slaves[slave_no].phy)
2779 		return -EOPNOTSUPP;
2780 
2781 	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2782 	return 0;
2783 }
2784 
2785 static int cpsw_set_link_ksettings(struct net_device *ndev,
2786 				   const struct ethtool_link_ksettings *ecmd)
2787 {
2788 	struct cpsw_priv *priv = netdev_priv(ndev);
2789 	struct cpsw_common *cpsw = priv->cpsw;
2790 	int slave_no = cpsw_slave_index(cpsw, priv);
2791 
2792 	if (cpsw->slaves[slave_no].phy)
2793 		return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2794 						 ecmd);
2795 	else
2796 		return -EOPNOTSUPP;
2797 }
2798 
2799 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2800 {
2801 	struct cpsw_priv *priv = netdev_priv(ndev);
2802 	struct cpsw_common *cpsw = priv->cpsw;
2803 	int slave_no = cpsw_slave_index(cpsw, priv);
2804 
2805 	wol->supported = 0;
2806 	wol->wolopts = 0;
2807 
2808 	if (cpsw->slaves[slave_no].phy)
2809 		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2810 }
2811 
2812 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2813 {
2814 	struct cpsw_priv *priv = netdev_priv(ndev);
2815 	struct cpsw_common *cpsw = priv->cpsw;
2816 	int slave_no = cpsw_slave_index(cpsw, priv);
2817 
2818 	if (cpsw->slaves[slave_no].phy)
2819 		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2820 	else
2821 		return -EOPNOTSUPP;
2822 }
2823 
2824 static void cpsw_get_pauseparam(struct net_device *ndev,
2825 				struct ethtool_pauseparam *pause)
2826 {
2827 	struct cpsw_priv *priv = netdev_priv(ndev);
2828 
2829 	pause->autoneg = AUTONEG_DISABLE;
2830 	pause->rx_pause = priv->rx_pause ? true : false;
2831 	pause->tx_pause = priv->tx_pause ? true : false;
2832 }
2833 
2834 static int cpsw_set_pauseparam(struct net_device *ndev,
2835 			       struct ethtool_pauseparam *pause)
2836 {
2837 	struct cpsw_priv *priv = netdev_priv(ndev);
2838 	bool link;
2839 
2840 	priv->rx_pause = pause->rx_pause ? true : false;
2841 	priv->tx_pause = pause->tx_pause ? true : false;
2842 
2843 	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2844 	return 0;
2845 }
2846 
2847 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2848 {
2849 	struct cpsw_priv *priv = netdev_priv(ndev);
2850 	struct cpsw_common *cpsw = priv->cpsw;
2851 	int ret;
2852 
2853 	ret = pm_runtime_get_sync(cpsw->dev);
2854 	if (ret < 0) {
2855 		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2856 		pm_runtime_put_noidle(cpsw->dev);
2857 	}
2858 
2859 	return ret;
2860 }
2861 
2862 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2863 {
2864 	struct cpsw_priv *priv = netdev_priv(ndev);
2865 	int ret;
2866 
2867 	ret = pm_runtime_put(priv->cpsw->dev);
2868 	if (ret < 0)
2869 		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2870 }
2871 
2872 static void cpsw_get_channels(struct net_device *ndev,
2873 			      struct ethtool_channels *ch)
2874 {
2875 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2876 
2877 	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2878 	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2879 	ch->max_combined = 0;
2880 	ch->max_other = 0;
2881 	ch->other_count = 0;
2882 	ch->rx_count = cpsw->rx_ch_num;
2883 	ch->tx_count = cpsw->tx_ch_num;
2884 	ch->combined_count = 0;
2885 }
2886 
2887 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2888 				  struct ethtool_channels *ch)
2889 {
2890 	if (cpsw->quirk_irq) {
2891 		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2892 		return -EOPNOTSUPP;
2893 	}
2894 
2895 	if (ch->combined_count)
2896 		return -EINVAL;
2897 
2898 	/* verify we have at least one channel in each direction */
2899 	if (!ch->rx_count || !ch->tx_count)
2900 		return -EINVAL;
2901 
2902 	if (ch->rx_count > cpsw->data.channels ||
2903 	    ch->tx_count > cpsw->data.channels)
2904 		return -EINVAL;
2905 
2906 	return 0;
2907 }
2908 
2909 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2910 {
2911 	struct cpsw_common *cpsw = priv->cpsw;
2912 	void (*handler)(void *, int, int);
2913 	struct netdev_queue *queue;
2914 	struct cpsw_vector *vec;
2915 	int ret, *ch, vch;
2916 
2917 	if (rx) {
2918 		ch = &cpsw->rx_ch_num;
2919 		vec = cpsw->rxv;
2920 		handler = cpsw_rx_handler;
2921 	} else {
2922 		ch = &cpsw->tx_ch_num;
2923 		vec = cpsw->txv;
2924 		handler = cpsw_tx_handler;
2925 	}
2926 
2927 	while (*ch < ch_num) {
2928 		vch = rx ? *ch : 7 - *ch;
2929 		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2930 		queue = netdev_get_tx_queue(priv->ndev, *ch);
2931 		queue->tx_maxrate = 0;
2932 
2933 		if (IS_ERR(vec[*ch].ch))
2934 			return PTR_ERR(vec[*ch].ch);
2935 
2936 		if (!vec[*ch].ch)
2937 			return -EINVAL;
2938 
2939 		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2940 			  (rx ? "rx" : "tx"));
2941 		(*ch)++;
2942 	}
2943 
2944 	while (*ch > ch_num) {
2945 		(*ch)--;
2946 
2947 		ret = cpdma_chan_destroy(vec[*ch].ch);
2948 		if (ret)
2949 			return ret;
2950 
2951 		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2952 			  (rx ? "rx" : "tx"));
2953 	}
2954 
2955 	return 0;
2956 }
2957 
2958 static int cpsw_update_channels(struct cpsw_priv *priv,
2959 				struct ethtool_channels *ch)
2960 {
2961 	int ret;
2962 
2963 	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2964 	if (ret)
2965 		return ret;
2966 
2967 	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2968 	if (ret)
2969 		return ret;
2970 
2971 	return 0;
2972 }
2973 
2974 static void cpsw_suspend_data_pass(struct net_device *ndev)
2975 {
2976 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2977 	struct cpsw_slave *slave;
2978 	int i;
2979 
2980 	/* Disable NAPI scheduling */
2981 	cpsw_intr_disable(cpsw);
2982 
2983 	/* Stop all transmit queues for every network device.
2984 	 * Disable re-using rx descriptors with dormant_on.
2985 	 */
2986 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2987 		if (!(slave->ndev && netif_running(slave->ndev)))
2988 			continue;
2989 
2990 		netif_tx_stop_all_queues(slave->ndev);
2991 		netif_dormant_on(slave->ndev);
2992 	}
2993 
2994 	/* Handle rest of tx packets and stop cpdma channels */
2995 	cpdma_ctlr_stop(cpsw->dma);
2996 }
2997 
2998 static int cpsw_resume_data_pass(struct net_device *ndev)
2999 {
3000 	struct cpsw_priv *priv = netdev_priv(ndev);
3001 	struct cpsw_common *cpsw = priv->cpsw;
3002 	struct cpsw_slave *slave;
3003 	int i, ret;
3004 
3005 	/* Allow rx packets handling */
3006 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3007 		if (slave->ndev && netif_running(slave->ndev))
3008 			netif_dormant_off(slave->ndev);
3009 
3010 	/* After this receive is started */
3011 	if (cpsw->usage_count) {
3012 		ret = cpsw_fill_rx_channels(priv);
3013 		if (ret)
3014 			return ret;
3015 
3016 		cpdma_ctlr_start(cpsw->dma);
3017 		cpsw_intr_enable(cpsw);
3018 	}
3019 
3020 	/* Resume transmit for every affected interface */
3021 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3022 		if (slave->ndev && netif_running(slave->ndev))
3023 			netif_tx_start_all_queues(slave->ndev);
3024 
3025 	return 0;
3026 }
3027 
3028 static int cpsw_set_channels(struct net_device *ndev,
3029 			     struct ethtool_channels *chs)
3030 {
3031 	struct cpsw_priv *priv = netdev_priv(ndev);
3032 	struct cpsw_common *cpsw = priv->cpsw;
3033 	struct cpsw_slave *slave;
3034 	int i, ret;
3035 
3036 	ret = cpsw_check_ch_settings(cpsw, chs);
3037 	if (ret < 0)
3038 		return ret;
3039 
3040 	cpsw_suspend_data_pass(ndev);
3041 	ret = cpsw_update_channels(priv, chs);
3042 	if (ret)
3043 		goto err;
3044 
3045 	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
3046 		if (!(slave->ndev && netif_running(slave->ndev)))
3047 			continue;
3048 
3049 		/* Inform stack about new count of queues */
3050 		ret = netif_set_real_num_tx_queues(slave->ndev,
3051 						   cpsw->tx_ch_num);
3052 		if (ret) {
3053 			dev_err(priv->dev, "cannot set real number of tx queues\n");
3054 			goto err;
3055 		}
3056 
3057 		ret = netif_set_real_num_rx_queues(slave->ndev,
3058 						   cpsw->rx_ch_num);
3059 		if (ret) {
3060 			dev_err(priv->dev, "cannot set real number of rx queues\n");
3061 			goto err;
3062 		}
3063 	}
3064 
3065 	if (cpsw->usage_count)
3066 		cpsw_split_res(ndev);
3067 
3068 	ret = cpsw_resume_data_pass(ndev);
3069 	if (!ret)
3070 		return 0;
3071 err:
3072 	dev_err(priv->dev, "cannot update channels number, closing device\n");
3073 	dev_close(ndev);
3074 	return ret;
3075 }
3076 
3077 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
3078 {
3079 	struct cpsw_priv *priv = netdev_priv(ndev);
3080 	struct cpsw_common *cpsw = priv->cpsw;
3081 	int slave_no = cpsw_slave_index(cpsw, priv);
3082 
3083 	if (cpsw->slaves[slave_no].phy)
3084 		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
3085 	else
3086 		return -EOPNOTSUPP;
3087 }
3088 
3089 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
3090 {
3091 	struct cpsw_priv *priv = netdev_priv(ndev);
3092 	struct cpsw_common *cpsw = priv->cpsw;
3093 	int slave_no = cpsw_slave_index(cpsw, priv);
3094 
3095 	if (cpsw->slaves[slave_no].phy)
3096 		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
3097 	else
3098 		return -EOPNOTSUPP;
3099 }
3100 
3101 static int cpsw_nway_reset(struct net_device *ndev)
3102 {
3103 	struct cpsw_priv *priv = netdev_priv(ndev);
3104 	struct cpsw_common *cpsw = priv->cpsw;
3105 	int slave_no = cpsw_slave_index(cpsw, priv);
3106 
3107 	if (cpsw->slaves[slave_no].phy)
3108 		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
3109 	else
3110 		return -EOPNOTSUPP;
3111 }
3112 
3113 static void cpsw_get_ringparam(struct net_device *ndev,
3114 			       struct ethtool_ringparam *ering)
3115 {
3116 	struct cpsw_priv *priv = netdev_priv(ndev);
3117 	struct cpsw_common *cpsw = priv->cpsw;
3118 
3119 	/* not supported */
3120 	ering->tx_max_pending = 0;
3121 	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
3122 	ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
3123 	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
3124 }
3125 
3126 static int cpsw_set_ringparam(struct net_device *ndev,
3127 			      struct ethtool_ringparam *ering)
3128 {
3129 	struct cpsw_priv *priv = netdev_priv(ndev);
3130 	struct cpsw_common *cpsw = priv->cpsw;
3131 	int ret;
3132 
3133 	/* ignore ering->tx_pending - only rx_pending adjustment is supported */
3134 
3135 	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
3136 	    ering->rx_pending < CPSW_MAX_QUEUES ||
3137 	    ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
3138 		return -EINVAL;
3139 
3140 	if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3141 		return 0;
3142 
3143 	cpsw_suspend_data_pass(ndev);
3144 
3145 	cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3146 
3147 	if (cpsw->usage_count)
3148 		cpdma_chan_split_pool(cpsw->dma);
3149 
3150 	ret = cpsw_resume_data_pass(ndev);
3151 	if (!ret)
3152 		return 0;
3153 
3154 	dev_err(&ndev->dev, "cannot set ring params, closing device\n");
3155 	dev_close(ndev);
3156 	return ret;
3157 }
3158 
3159 static const struct ethtool_ops cpsw_ethtool_ops = {
3160 	.get_drvinfo	= cpsw_get_drvinfo,
3161 	.get_msglevel	= cpsw_get_msglevel,
3162 	.set_msglevel	= cpsw_set_msglevel,
3163 	.get_link	= ethtool_op_get_link,
3164 	.get_ts_info	= cpsw_get_ts_info,
3165 	.get_coalesce	= cpsw_get_coalesce,
3166 	.set_coalesce	= cpsw_set_coalesce,
3167 	.get_sset_count		= cpsw_get_sset_count,
3168 	.get_strings		= cpsw_get_strings,
3169 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
3170 	.get_pauseparam		= cpsw_get_pauseparam,
3171 	.set_pauseparam		= cpsw_set_pauseparam,
3172 	.get_wol	= cpsw_get_wol,
3173 	.set_wol	= cpsw_set_wol,
3174 	.get_regs_len	= cpsw_get_regs_len,
3175 	.get_regs	= cpsw_get_regs,
3176 	.begin		= cpsw_ethtool_op_begin,
3177 	.complete	= cpsw_ethtool_op_complete,
3178 	.get_channels	= cpsw_get_channels,
3179 	.set_channels	= cpsw_set_channels,
3180 	.get_link_ksettings	= cpsw_get_link_ksettings,
3181 	.set_link_ksettings	= cpsw_set_link_ksettings,
3182 	.get_eee	= cpsw_get_eee,
3183 	.set_eee	= cpsw_set_eee,
3184 	.nway_reset	= cpsw_nway_reset,
3185 	.get_ringparam = cpsw_get_ringparam,
3186 	.set_ringparam = cpsw_set_ringparam,
3187 };
3188 
3189 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
3190 			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
3191 {
3192 	void __iomem		*regs = cpsw->regs;
3193 	int			slave_num = slave->slave_num;
3194 	struct cpsw_slave_data	*data = cpsw->data.slave_data + slave_num;
3195 
3196 	slave->data	= data;
3197 	slave->regs	= regs + slave_reg_ofs;
3198 	slave->sliver	= regs + sliver_reg_ofs;
3199 	slave->port_vlan = data->dual_emac_res_vlan;
3200 }
3201 
3202 static int cpsw_probe_dt(struct cpsw_platform_data *data,
3203 			 struct platform_device *pdev)
3204 {
3205 	struct device_node *node = pdev->dev.of_node;
3206 	struct device_node *slave_node;
3207 	int i = 0, ret;
3208 	u32 prop;
3209 
3210 	if (!node)
3211 		return -EINVAL;
3212 
3213 	if (of_property_read_u32(node, "slaves", &prop)) {
3214 		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
3215 		return -EINVAL;
3216 	}
3217 	data->slaves = prop;
3218 
3219 	if (of_property_read_u32(node, "active_slave", &prop)) {
3220 		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
3221 		return -EINVAL;
3222 	}
3223 	data->active_slave = prop;
3224 
3225 	data->slave_data = devm_kcalloc(&pdev->dev,
3226 					data->slaves,
3227 					sizeof(struct cpsw_slave_data),
3228 					GFP_KERNEL);
3229 	if (!data->slave_data)
3230 		return -ENOMEM;
3231 
3232 	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
3233 		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
3234 		return -EINVAL;
3235 	}
3236 	data->channels = prop;
3237 
3238 	if (of_property_read_u32(node, "ale_entries", &prop)) {
3239 		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
3240 		return -EINVAL;
3241 	}
3242 	data->ale_entries = prop;
3243 
3244 	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
3245 		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
3246 		return -EINVAL;
3247 	}
3248 	data->bd_ram_size = prop;
3249 
3250 	if (of_property_read_u32(node, "mac_control", &prop)) {
3251 		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
3252 		return -EINVAL;
3253 	}
3254 	data->mac_control = prop;
3255 
3256 	if (of_property_read_bool(node, "dual_emac"))
3257 		data->dual_emac = 1;
3258 
3259 	/*
3260 	 * Populate all the child nodes here...
3261 	 */
3262 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3263 	/* We do not want to force this, as in some cases may not have child */
3264 	if (ret)
3265 		dev_warn(&pdev->dev, "Doesn't have any child node\n");
3266 
3267 	for_each_available_child_of_node(node, slave_node) {
3268 		struct cpsw_slave_data *slave_data = data->slave_data + i;
3269 		const void *mac_addr = NULL;
3270 		int lenp;
3271 		const __be32 *parp;
3272 
3273 		/* This is no slave child node, continue */
3274 		if (!of_node_name_eq(slave_node, "slave"))
3275 			continue;
3276 
3277 		slave_data->phy_node = of_parse_phandle(slave_node,
3278 							"phy-handle", 0);
3279 		parp = of_get_property(slave_node, "phy_id", &lenp);
3280 		if (slave_data->phy_node) {
3281 			dev_dbg(&pdev->dev,
3282 				"slave[%d] using phy-handle=\"%pOF\"\n",
3283 				i, slave_data->phy_node);
3284 		} else if (of_phy_is_fixed_link(slave_node)) {
3285 			/* In the case of a fixed PHY, the DT node associated
3286 			 * to the PHY is the Ethernet MAC DT node.
3287 			 */
3288 			ret = of_phy_register_fixed_link(slave_node);
3289 			if (ret) {
3290 				if (ret != -EPROBE_DEFER)
3291 					dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
3292 				return ret;
3293 			}
3294 			slave_data->phy_node = of_node_get(slave_node);
3295 		} else if (parp) {
3296 			u32 phyid;
3297 			struct device_node *mdio_node;
3298 			struct platform_device *mdio;
3299 
3300 			if (lenp != (sizeof(__be32) * 2)) {
3301 				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3302 				goto no_phy_slave;
3303 			}
3304 			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3305 			phyid = be32_to_cpup(parp+1);
3306 			mdio = of_find_device_by_node(mdio_node);
3307 			of_node_put(mdio_node);
3308 			if (!mdio) {
3309 				dev_err(&pdev->dev, "Missing mdio platform device\n");
3310 				return -EINVAL;
3311 			}
3312 			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3313 				 PHY_ID_FMT, mdio->name, phyid);
3314 			put_device(&mdio->dev);
3315 		} else {
3316 			dev_err(&pdev->dev,
3317 				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3318 				i);
3319 			goto no_phy_slave;
3320 		}
3321 		slave_data->phy_if = of_get_phy_mode(slave_node);
3322 		if (slave_data->phy_if < 0) {
3323 			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3324 				i);
3325 			return slave_data->phy_if;
3326 		}
3327 
3328 no_phy_slave:
3329 		mac_addr = of_get_mac_address(slave_node);
3330 		if (mac_addr) {
3331 			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
3332 		} else {
3333 			ret = ti_cm_get_macid(&pdev->dev, i,
3334 					      slave_data->mac_addr);
3335 			if (ret)
3336 				return ret;
3337 		}
3338 		if (data->dual_emac) {
3339 			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
3340 						 &prop)) {
3341 				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
3342 				slave_data->dual_emac_res_vlan = i+1;
3343 				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3344 					slave_data->dual_emac_res_vlan, i);
3345 			} else {
3346 				slave_data->dual_emac_res_vlan = prop;
3347 			}
3348 		}
3349 
3350 		i++;
3351 		if (i == data->slaves)
3352 			break;
3353 	}
3354 
3355 	return 0;
3356 }
3357 
3358 static void cpsw_remove_dt(struct platform_device *pdev)
3359 {
3360 	struct net_device *ndev = platform_get_drvdata(pdev);
3361 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3362 	struct cpsw_platform_data *data = &cpsw->data;
3363 	struct device_node *node = pdev->dev.of_node;
3364 	struct device_node *slave_node;
3365 	int i = 0;
3366 
3367 	for_each_available_child_of_node(node, slave_node) {
3368 		struct cpsw_slave_data *slave_data = &data->slave_data[i];
3369 
3370 		if (!of_node_name_eq(slave_node, "slave"))
3371 			continue;
3372 
3373 		if (of_phy_is_fixed_link(slave_node))
3374 			of_phy_deregister_fixed_link(slave_node);
3375 
3376 		of_node_put(slave_data->phy_node);
3377 
3378 		i++;
3379 		if (i == data->slaves)
3380 			break;
3381 	}
3382 
3383 	of_platform_depopulate(&pdev->dev);
3384 }
3385 
3386 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
3387 {
3388 	struct cpsw_common		*cpsw = priv->cpsw;
3389 	struct cpsw_platform_data	*data = &cpsw->data;
3390 	struct net_device		*ndev;
3391 	struct cpsw_priv		*priv_sl2;
3392 	int ret = 0;
3393 
3394 	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3395 	if (!ndev) {
3396 		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
3397 		return -ENOMEM;
3398 	}
3399 
3400 	priv_sl2 = netdev_priv(ndev);
3401 	priv_sl2->cpsw = cpsw;
3402 	priv_sl2->ndev = ndev;
3403 	priv_sl2->dev  = &ndev->dev;
3404 	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3405 
3406 	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3407 		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3408 			ETH_ALEN);
3409 		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3410 			 priv_sl2->mac_addr);
3411 	} else {
3412 		eth_random_addr(priv_sl2->mac_addr);
3413 		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3414 			 priv_sl2->mac_addr);
3415 	}
3416 	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3417 
3418 	priv_sl2->emac_port = 1;
3419 	cpsw->slaves[1].ndev = ndev;
3420 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3421 
3422 	ndev->netdev_ops = &cpsw_netdev_ops;
3423 	ndev->ethtool_ops = &cpsw_ethtool_ops;
3424 
3425 	/* register the network device */
3426 	SET_NETDEV_DEV(ndev, cpsw->dev);
3427 	ret = register_netdev(ndev);
3428 	if (ret) {
3429 		dev_err(cpsw->dev, "cpsw: error registering net device\n");
3430 		free_netdev(ndev);
3431 		ret = -ENODEV;
3432 	}
3433 
3434 	return ret;
3435 }
3436 
3437 static const struct of_device_id cpsw_of_mtable[] = {
3438 	{ .compatible = "ti,cpsw"},
3439 	{ .compatible = "ti,am335x-cpsw"},
3440 	{ .compatible = "ti,am4372-cpsw"},
3441 	{ .compatible = "ti,dra7-cpsw"},
3442 	{ /* sentinel */ },
3443 };
3444 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3445 
3446 static const struct soc_device_attribute cpsw_soc_devices[] = {
3447 	{ .family = "AM33xx", .revision = "ES1.0"},
3448 	{ /* sentinel */ }
3449 };
3450 
3451 static int cpsw_probe(struct platform_device *pdev)
3452 {
3453 	struct clk			*clk;
3454 	struct cpsw_platform_data	*data;
3455 	struct net_device		*ndev;
3456 	struct cpsw_priv		*priv;
3457 	struct cpdma_params		dma_params;
3458 	struct cpsw_ale_params		ale_params;
3459 	void __iomem			*ss_regs;
3460 	void __iomem			*cpts_regs;
3461 	struct resource			*res, *ss_res;
3462 	struct gpio_descs		*mode;
3463 	u32 slave_offset, sliver_offset, slave_size;
3464 	const struct soc_device_attribute *soc;
3465 	struct cpsw_common		*cpsw;
3466 	int ret = 0, i, ch;
3467 	int irq;
3468 
3469 	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
3470 	if (!cpsw)
3471 		return -ENOMEM;
3472 
3473 	cpsw->dev = &pdev->dev;
3474 
3475 	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3476 	if (!ndev) {
3477 		dev_err(&pdev->dev, "error allocating net_device\n");
3478 		return -ENOMEM;
3479 	}
3480 
3481 	platform_set_drvdata(pdev, ndev);
3482 	priv = netdev_priv(ndev);
3483 	priv->cpsw = cpsw;
3484 	priv->ndev = ndev;
3485 	priv->dev  = &ndev->dev;
3486 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3487 	cpsw->rx_packet_max = max(rx_packet_max, 128);
3488 
3489 	mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
3490 	if (IS_ERR(mode)) {
3491 		ret = PTR_ERR(mode);
3492 		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
3493 		goto clean_ndev_ret;
3494 	}
3495 
3496 	/*
3497 	 * This may be required here for child devices.
3498 	 */
3499 	pm_runtime_enable(&pdev->dev);
3500 
3501 	/* Select default pin state */
3502 	pinctrl_pm_select_default_state(&pdev->dev);
3503 
3504 	/* Need to enable clocks with runtime PM api to access module
3505 	 * registers
3506 	 */
3507 	ret = pm_runtime_get_sync(&pdev->dev);
3508 	if (ret < 0) {
3509 		pm_runtime_put_noidle(&pdev->dev);
3510 		goto clean_runtime_disable_ret;
3511 	}
3512 
3513 	ret = cpsw_probe_dt(&cpsw->data, pdev);
3514 	if (ret)
3515 		goto clean_dt_ret;
3516 
3517 	data = &cpsw->data;
3518 	cpsw->rx_ch_num = 1;
3519 	cpsw->tx_ch_num = 1;
3520 
3521 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3522 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
3523 		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
3524 	} else {
3525 		eth_random_addr(priv->mac_addr);
3526 		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
3527 	}
3528 
3529 	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3530 
3531 	cpsw->slaves = devm_kcalloc(&pdev->dev,
3532 				    data->slaves, sizeof(struct cpsw_slave),
3533 				    GFP_KERNEL);
3534 	if (!cpsw->slaves) {
3535 		ret = -ENOMEM;
3536 		goto clean_dt_ret;
3537 	}
3538 	for (i = 0; i < data->slaves; i++)
3539 		cpsw->slaves[i].slave_num = i;
3540 
3541 	cpsw->slaves[0].ndev = ndev;
3542 	priv->emac_port = 0;
3543 
3544 	clk = devm_clk_get(&pdev->dev, "fck");
3545 	if (IS_ERR(clk)) {
3546 		dev_err(priv->dev, "fck is not found\n");
3547 		ret = -ENODEV;
3548 		goto clean_dt_ret;
3549 	}
3550 	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
3551 
3552 	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3553 	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
3554 	if (IS_ERR(ss_regs)) {
3555 		ret = PTR_ERR(ss_regs);
3556 		goto clean_dt_ret;
3557 	}
3558 	cpsw->regs = ss_regs;
3559 
3560 	cpsw->version = readl(&cpsw->regs->id_ver);
3561 
3562 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3563 	cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
3564 	if (IS_ERR(cpsw->wr_regs)) {
3565 		ret = PTR_ERR(cpsw->wr_regs);
3566 		goto clean_dt_ret;
3567 	}
3568 
3569 	memset(&dma_params, 0, sizeof(dma_params));
3570 	memset(&ale_params, 0, sizeof(ale_params));
3571 
3572 	switch (cpsw->version) {
3573 	case CPSW_VERSION_1:
3574 		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
3575 		cpts_regs		= ss_regs + CPSW1_CPTS_OFFSET;
3576 		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
3577 		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
3578 		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
3579 		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
3580 		slave_offset         = CPSW1_SLAVE_OFFSET;
3581 		slave_size           = CPSW1_SLAVE_SIZE;
3582 		sliver_offset        = CPSW1_SLIVER_OFFSET;
3583 		dma_params.desc_mem_phys = 0;
3584 		break;
3585 	case CPSW_VERSION_2:
3586 	case CPSW_VERSION_3:
3587 	case CPSW_VERSION_4:
3588 		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
3589 		cpts_regs		= ss_regs + CPSW2_CPTS_OFFSET;
3590 		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
3591 		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
3592 		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
3593 		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
3594 		slave_offset         = CPSW2_SLAVE_OFFSET;
3595 		slave_size           = CPSW2_SLAVE_SIZE;
3596 		sliver_offset        = CPSW2_SLIVER_OFFSET;
3597 		dma_params.desc_mem_phys =
3598 			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
3599 		break;
3600 	default:
3601 		dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
3602 		ret = -ENODEV;
3603 		goto clean_dt_ret;
3604 	}
3605 	for (i = 0; i < cpsw->data.slaves; i++) {
3606 		struct cpsw_slave *slave = &cpsw->slaves[i];
3607 
3608 		cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
3609 		slave_offset  += slave_size;
3610 		sliver_offset += SLIVER_SIZE;
3611 	}
3612 
3613 	dma_params.dev		= &pdev->dev;
3614 	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
3615 	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
3616 	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
3617 	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
3618 	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
3619 
3620 	dma_params.num_chan		= data->channels;
3621 	dma_params.has_soft_reset	= true;
3622 	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
3623 	dma_params.desc_mem_size	= data->bd_ram_size;
3624 	dma_params.desc_align		= 16;
3625 	dma_params.has_ext_regs		= true;
3626 	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
3627 	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
3628 	dma_params.descs_pool_size	= descs_pool_size;
3629 
3630 	cpsw->dma = cpdma_ctlr_create(&dma_params);
3631 	if (!cpsw->dma) {
3632 		dev_err(priv->dev, "error initializing dma\n");
3633 		ret = -ENOMEM;
3634 		goto clean_dt_ret;
3635 	}
3636 
3637 	soc = soc_device_match(cpsw_soc_devices);
3638 	if (soc)
3639 		cpsw->quirk_irq = 1;
3640 
3641 	ch = cpsw->quirk_irq ? 0 : 7;
3642 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
3643 	if (IS_ERR(cpsw->txv[0].ch)) {
3644 		dev_err(priv->dev, "error initializing tx dma channel\n");
3645 		ret = PTR_ERR(cpsw->txv[0].ch);
3646 		goto clean_dma_ret;
3647 	}
3648 
3649 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3650 	if (IS_ERR(cpsw->rxv[0].ch)) {
3651 		dev_err(priv->dev, "error initializing rx dma channel\n");
3652 		ret = PTR_ERR(cpsw->rxv[0].ch);
3653 		goto clean_dma_ret;
3654 	}
3655 
3656 	ale_params.dev			= &pdev->dev;
3657 	ale_params.ale_ageout		= ale_ageout;
3658 	ale_params.ale_entries		= data->ale_entries;
3659 	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
3660 
3661 	cpsw->ale = cpsw_ale_create(&ale_params);
3662 	if (!cpsw->ale) {
3663 		dev_err(priv->dev, "error initializing ale engine\n");
3664 		ret = -ENODEV;
3665 		goto clean_dma_ret;
3666 	}
3667 
3668 	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3669 	if (IS_ERR(cpsw->cpts)) {
3670 		ret = PTR_ERR(cpsw->cpts);
3671 		goto clean_dma_ret;
3672 	}
3673 
3674 	ndev->irq = platform_get_irq(pdev, 1);
3675 	if (ndev->irq < 0) {
3676 		dev_err(priv->dev, "error getting irq resource\n");
3677 		ret = ndev->irq;
3678 		goto clean_dma_ret;
3679 	}
3680 
3681 	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3682 
3683 	ndev->netdev_ops = &cpsw_netdev_ops;
3684 	ndev->ethtool_ops = &cpsw_ethtool_ops;
3685 	netif_napi_add(ndev, &cpsw->napi_rx,
3686 		       cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3687 		       CPSW_POLL_WEIGHT);
3688 	netif_tx_napi_add(ndev, &cpsw->napi_tx,
3689 			  cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3690 			  CPSW_POLL_WEIGHT);
3691 	cpsw_split_res(ndev);
3692 
3693 	/* register the network device */
3694 	SET_NETDEV_DEV(ndev, &pdev->dev);
3695 	ret = register_netdev(ndev);
3696 	if (ret) {
3697 		dev_err(priv->dev, "error registering net device\n");
3698 		ret = -ENODEV;
3699 		goto clean_dma_ret;
3700 	}
3701 
3702 	if (cpsw->data.dual_emac) {
3703 		ret = cpsw_probe_dual_emac(priv);
3704 		if (ret) {
3705 			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3706 			goto clean_unregister_netdev_ret;
3707 		}
3708 	}
3709 
3710 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3711 	 * MISC IRQs which are always kept disabled with this driver so
3712 	 * we will not request them.
3713 	 *
3714 	 * If anyone wants to implement support for those, make sure to
3715 	 * first request and append them to irqs_table array.
3716 	 */
3717 
3718 	/* RX IRQ */
3719 	irq = platform_get_irq(pdev, 1);
3720 	if (irq < 0) {
3721 		ret = irq;
3722 		goto clean_dma_ret;
3723 	}
3724 
3725 	cpsw->irqs_table[0] = irq;
3726 	ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
3727 			       0, dev_name(&pdev->dev), cpsw);
3728 	if (ret < 0) {
3729 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3730 		goto clean_dma_ret;
3731 	}
3732 
3733 	/* TX IRQ */
3734 	irq = platform_get_irq(pdev, 2);
3735 	if (irq < 0) {
3736 		ret = irq;
3737 		goto clean_dma_ret;
3738 	}
3739 
3740 	cpsw->irqs_table[1] = irq;
3741 	ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
3742 			       0, dev_name(&pdev->dev), cpsw);
3743 	if (ret < 0) {
3744 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3745 		goto clean_dma_ret;
3746 	}
3747 
3748 	cpsw_notice(priv, probe,
3749 		    "initialized device (regs %pa, irq %d, pool size %d)\n",
3750 		    &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3751 
3752 	pm_runtime_put(&pdev->dev);
3753 
3754 	return 0;
3755 
3756 clean_unregister_netdev_ret:
3757 	unregister_netdev(ndev);
3758 clean_dma_ret:
3759 	cpdma_ctlr_destroy(cpsw->dma);
3760 clean_dt_ret:
3761 	cpsw_remove_dt(pdev);
3762 	pm_runtime_put_sync(&pdev->dev);
3763 clean_runtime_disable_ret:
3764 	pm_runtime_disable(&pdev->dev);
3765 clean_ndev_ret:
3766 	free_netdev(priv->ndev);
3767 	return ret;
3768 }
3769 
3770 static int cpsw_remove(struct platform_device *pdev)
3771 {
3772 	struct net_device *ndev = platform_get_drvdata(pdev);
3773 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3774 	int ret;
3775 
3776 	ret = pm_runtime_get_sync(&pdev->dev);
3777 	if (ret < 0) {
3778 		pm_runtime_put_noidle(&pdev->dev);
3779 		return ret;
3780 	}
3781 
3782 	if (cpsw->data.dual_emac)
3783 		unregister_netdev(cpsw->slaves[1].ndev);
3784 	unregister_netdev(ndev);
3785 
3786 	cpts_release(cpsw->cpts);
3787 	cpdma_ctlr_destroy(cpsw->dma);
3788 	cpsw_remove_dt(pdev);
3789 	pm_runtime_put_sync(&pdev->dev);
3790 	pm_runtime_disable(&pdev->dev);
3791 	if (cpsw->data.dual_emac)
3792 		free_netdev(cpsw->slaves[1].ndev);
3793 	free_netdev(ndev);
3794 	return 0;
3795 }
3796 
3797 #ifdef CONFIG_PM_SLEEP
3798 static int cpsw_suspend(struct device *dev)
3799 {
3800 	struct net_device	*ndev = dev_get_drvdata(dev);
3801 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3802 
3803 	if (cpsw->data.dual_emac) {
3804 		int i;
3805 
3806 		for (i = 0; i < cpsw->data.slaves; i++) {
3807 			if (netif_running(cpsw->slaves[i].ndev))
3808 				cpsw_ndo_stop(cpsw->slaves[i].ndev);
3809 		}
3810 	} else {
3811 		if (netif_running(ndev))
3812 			cpsw_ndo_stop(ndev);
3813 	}
3814 
3815 	/* Select sleep pin state */
3816 	pinctrl_pm_select_sleep_state(dev);
3817 
3818 	return 0;
3819 }
3820 
3821 static int cpsw_resume(struct device *dev)
3822 {
3823 	struct net_device	*ndev = dev_get_drvdata(dev);
3824 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3825 
3826 	/* Select default pin state */
3827 	pinctrl_pm_select_default_state(dev);
3828 
3829 	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3830 	rtnl_lock();
3831 	if (cpsw->data.dual_emac) {
3832 		int i;
3833 
3834 		for (i = 0; i < cpsw->data.slaves; i++) {
3835 			if (netif_running(cpsw->slaves[i].ndev))
3836 				cpsw_ndo_open(cpsw->slaves[i].ndev);
3837 		}
3838 	} else {
3839 		if (netif_running(ndev))
3840 			cpsw_ndo_open(ndev);
3841 	}
3842 	rtnl_unlock();
3843 
3844 	return 0;
3845 }
3846 #endif
3847 
3848 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3849 
3850 static struct platform_driver cpsw_driver = {
3851 	.driver = {
3852 		.name	 = "cpsw",
3853 		.pm	 = &cpsw_pm_ops,
3854 		.of_match_table = cpsw_of_mtable,
3855 	},
3856 	.probe = cpsw_probe,
3857 	.remove = cpsw_remove,
3858 };
3859 
3860 module_platform_driver(cpsw_driver);
3861 
3862 MODULE_LICENSE("GPL");
3863 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3864 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3865 MODULE_DESCRIPTION("TI CPSW Ethernet driver");
3866