1 /*
2  * Keystone GBE and XGBE subsystem code
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Sandeep Paulraj <s-paulraj@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *		Wingman Kwok <w-kwok2@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation version 2.
14  *
15  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16  * kind, whether express or implied; without even the implied warranty
17  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/ptp_classify.h>
28 #include <linux/net_tstamp.h>
29 #include <linux/ethtool.h>
30 
31 #include "cpsw.h"
32 #include "cpsw_ale.h"
33 #include "netcp.h"
34 #include "cpts.h"
35 
36 #define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
37 #define NETCP_DRIVER_VERSION		"v1.0"
38 
39 #define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
40 #define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
41 #define GBE_MINOR_VERSION(reg)		(reg & 0xff)
42 #define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
43 
44 /* 1G Ethernet SS defines */
45 #define GBE_MODULE_NAME			"netcp-gbe"
46 #define GBE_SS_VERSION_14		0x4ed2
47 
48 #define GBE_SS_REG_INDEX		0
49 #define GBE_SGMII34_REG_INDEX		1
50 #define GBE_SM_REG_INDEX		2
51 /* offset relative to base of GBE_SS_REG_INDEX */
52 #define GBE13_SGMII_MODULE_OFFSET	0x100
53 /* offset relative to base of GBE_SM_REG_INDEX */
54 #define GBE13_HOST_PORT_OFFSET		0x34
55 #define GBE13_SLAVE_PORT_OFFSET		0x60
56 #define GBE13_EMAC_OFFSET		0x100
57 #define GBE13_SLAVE_PORT2_OFFSET	0x200
58 #define GBE13_HW_STATS_OFFSET		0x300
59 #define GBE13_CPTS_OFFSET		0x500
60 #define GBE13_ALE_OFFSET		0x600
61 #define GBE13_HOST_PORT_NUM		0
62 #define GBE13_NUM_ALE_ENTRIES		1024
63 
64 /* 1G Ethernet NU SS defines */
65 #define GBENU_MODULE_NAME		"netcp-gbenu"
66 #define GBE_SS_ID_NU			0x4ee6
67 #define GBE_SS_ID_2U			0x4ee8
68 
69 #define IS_SS_ID_MU(d) \
70 	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
71 	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
72 
73 #define IS_SS_ID_NU(d) \
74 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
75 
76 #define IS_SS_ID_VER_14(d) \
77 	(GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
78 #define IS_SS_ID_2U(d) \
79 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
80 
81 #define GBENU_SS_REG_INDEX		0
82 #define GBENU_SM_REG_INDEX		1
83 #define GBENU_SGMII_MODULE_OFFSET	0x100
84 #define GBENU_HOST_PORT_OFFSET		0x1000
85 #define GBENU_SLAVE_PORT_OFFSET		0x2000
86 #define GBENU_EMAC_OFFSET		0x2330
87 #define GBENU_HW_STATS_OFFSET		0x1a000
88 #define GBENU_CPTS_OFFSET		0x1d000
89 #define GBENU_ALE_OFFSET		0x1e000
90 #define GBENU_HOST_PORT_NUM		0
91 #define GBENU_SGMII_MODULE_SIZE		0x100
92 
93 /* 10G Ethernet SS defines */
94 #define XGBE_MODULE_NAME		"netcp-xgbe"
95 #define XGBE_SS_VERSION_10		0x4ee4
96 
97 #define XGBE_SS_REG_INDEX		0
98 #define XGBE_SM_REG_INDEX		1
99 #define XGBE_SERDES_REG_INDEX		2
100 
101 /* offset relative to base of XGBE_SS_REG_INDEX */
102 #define XGBE10_SGMII_MODULE_OFFSET	0x100
103 #define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
104 /* offset relative to base of XGBE_SM_REG_INDEX */
105 #define XGBE10_HOST_PORT_OFFSET		0x34
106 #define XGBE10_SLAVE_PORT_OFFSET	0x64
107 #define XGBE10_EMAC_OFFSET		0x400
108 #define XGBE10_CPTS_OFFSET		0x600
109 #define XGBE10_ALE_OFFSET		0x700
110 #define XGBE10_HW_STATS_OFFSET		0x800
111 #define XGBE10_HOST_PORT_NUM		0
112 #define XGBE10_NUM_ALE_ENTRIES		2048
113 
114 #define	GBE_TIMER_INTERVAL			(HZ / 2)
115 
116 /* Soft reset register values */
117 #define SOFT_RESET_MASK				BIT(0)
118 #define SOFT_RESET				BIT(0)
119 #define DEVICE_EMACSL_RESET_POLL_COUNT		100
120 #define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
121 
122 #define MACSL_RX_ENABLE_CSF			BIT(23)
123 #define MACSL_ENABLE_EXT_CTL			BIT(18)
124 #define MACSL_XGMII_ENABLE			BIT(13)
125 #define MACSL_XGIG_MODE				BIT(8)
126 #define MACSL_GIG_MODE				BIT(7)
127 #define MACSL_GMII_ENABLE			BIT(5)
128 #define MACSL_FULLDUPLEX			BIT(0)
129 
130 #define GBE_CTL_P0_ENABLE			BIT(2)
131 #define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
132 #define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
133 #define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
134 #define GBE_STATS_CD_SEL			BIT(28)
135 
136 #define GBE_PORT_MASK(x)			(BIT(x) - 1)
137 #define GBE_MASK_NO_PORTS			0
138 
139 #define GBE_DEF_1G_MAC_CONTROL					\
140 		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
141 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
142 
143 #define GBE_DEF_10G_MAC_CONTROL				\
144 		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
145 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
146 
147 #define GBE_STATSA_MODULE			0
148 #define GBE_STATSB_MODULE			1
149 #define GBE_STATSC_MODULE			2
150 #define GBE_STATSD_MODULE			3
151 
152 #define GBENU_STATS0_MODULE			0
153 #define GBENU_STATS1_MODULE			1
154 #define GBENU_STATS2_MODULE			2
155 #define GBENU_STATS3_MODULE			3
156 #define GBENU_STATS4_MODULE			4
157 #define GBENU_STATS5_MODULE			5
158 #define GBENU_STATS6_MODULE			6
159 #define GBENU_STATS7_MODULE			7
160 #define GBENU_STATS8_MODULE			8
161 
162 #define XGBE_STATS0_MODULE			0
163 #define XGBE_STATS1_MODULE			1
164 #define XGBE_STATS2_MODULE			2
165 
166 /* s: 0-based slave_port */
167 #define SGMII_BASE(d, s) \
168 	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
169 
170 #define GBE_TX_QUEUE				648
171 #define	GBE_TXHOOK_ORDER			0
172 #define	GBE_RXHOOK_ORDER			0
173 #define GBE_DEFAULT_ALE_AGEOUT			30
174 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
175 #define SLAVE_LINK_IS_RGMII(s) \
176 	(((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
177 	 ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
178 #define SLAVE_LINK_IS_SGMII(s) \
179 	((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
180 #define NETCP_LINK_STATE_INVALID		-1
181 
182 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
183 		offsetof(struct gbe##_##rb, rn)
184 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
185 		offsetof(struct gbenu##_##rb, rn)
186 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
187 		offsetof(struct xgbe##_##rb, rn)
188 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
189 
190 #define HOST_TX_PRI_MAP_DEFAULT			0x00000000
191 
192 #if IS_ENABLED(CONFIG_TI_CPTS)
193 /* Px_TS_CTL register fields */
194 #define TS_RX_ANX_F_EN				BIT(0)
195 #define TS_RX_VLAN_LT1_EN			BIT(1)
196 #define TS_RX_VLAN_LT2_EN			BIT(2)
197 #define TS_RX_ANX_D_EN				BIT(3)
198 #define TS_TX_ANX_F_EN				BIT(4)
199 #define TS_TX_VLAN_LT1_EN			BIT(5)
200 #define TS_TX_VLAN_LT2_EN			BIT(6)
201 #define TS_TX_ANX_D_EN				BIT(7)
202 #define TS_LT2_EN				BIT(8)
203 #define TS_RX_ANX_E_EN				BIT(9)
204 #define TS_TX_ANX_E_EN				BIT(10)
205 #define TS_MSG_TYPE_EN_SHIFT			16
206 #define TS_MSG_TYPE_EN_MASK			0xffff
207 
208 /* Px_TS_SEQ_LTYPE register fields */
209 #define TS_SEQ_ID_OFS_SHIFT			16
210 #define TS_SEQ_ID_OFS_MASK			0x3f
211 
212 /* Px_TS_CTL_LTYPE2 register fields */
213 #define TS_107					BIT(16)
214 #define TS_129					BIT(17)
215 #define TS_130					BIT(18)
216 #define TS_131					BIT(19)
217 #define TS_132					BIT(20)
218 #define TS_319					BIT(21)
219 #define TS_320					BIT(22)
220 #define TS_TTL_NONZERO				BIT(23)
221 #define TS_UNI_EN				BIT(24)
222 #define TS_UNI_EN_SHIFT				24
223 
224 #define TS_TX_ANX_ALL_EN	 \
225 	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
226 
227 #define TS_RX_ANX_ALL_EN	 \
228 	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
229 
230 #define TS_CTL_DST_PORT				TS_319
231 #define TS_CTL_DST_PORT_SHIFT			21
232 
233 #define TS_CTL_MADDR_ALL	\
234 	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
235 
236 #define TS_CTL_MADDR_SHIFT			16
237 
238 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
239 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
240 #endif /* CONFIG_TI_CPTS */
241 
242 struct xgbe_ss_regs {
243 	u32	id_ver;
244 	u32	synce_count;
245 	u32	synce_mux;
246 	u32	control;
247 };
248 
249 struct xgbe_switch_regs {
250 	u32	id_ver;
251 	u32	control;
252 	u32	emcontrol;
253 	u32	stat_port_en;
254 	u32	ptype;
255 	u32	soft_idle;
256 	u32	thru_rate;
257 	u32	gap_thresh;
258 	u32	tx_start_wds;
259 	u32	flow_control;
260 	u32	cppi_thresh;
261 };
262 
263 struct xgbe_port_regs {
264 	u32	blk_cnt;
265 	u32	port_vlan;
266 	u32	tx_pri_map;
267 	u32	sa_lo;
268 	u32	sa_hi;
269 	u32	ts_ctl;
270 	u32	ts_seq_ltype;
271 	u32	ts_vlan;
272 	u32	ts_ctl_ltype2;
273 	u32	ts_ctl2;
274 	u32	control;
275 };
276 
277 struct xgbe_host_port_regs {
278 	u32	blk_cnt;
279 	u32	port_vlan;
280 	u32	tx_pri_map;
281 	u32	src_id;
282 	u32	rx_pri_map;
283 	u32	rx_maxlen;
284 };
285 
286 struct xgbe_emac_regs {
287 	u32	id_ver;
288 	u32	mac_control;
289 	u32	mac_status;
290 	u32	soft_reset;
291 	u32	rx_maxlen;
292 	u32	__reserved_0;
293 	u32	rx_pause;
294 	u32	tx_pause;
295 	u32	em_control;
296 	u32	__reserved_1;
297 	u32	tx_gap;
298 	u32	rsvd[4];
299 };
300 
301 struct xgbe_host_hw_stats {
302 	u32	rx_good_frames;
303 	u32	rx_broadcast_frames;
304 	u32	rx_multicast_frames;
305 	u32	__rsvd_0[3];
306 	u32	rx_oversized_frames;
307 	u32	__rsvd_1;
308 	u32	rx_undersized_frames;
309 	u32	__rsvd_2;
310 	u32	overrun_type4;
311 	u32	overrun_type5;
312 	u32	rx_bytes;
313 	u32	tx_good_frames;
314 	u32	tx_broadcast_frames;
315 	u32	tx_multicast_frames;
316 	u32	__rsvd_3[9];
317 	u32	tx_bytes;
318 	u32	tx_64byte_frames;
319 	u32	tx_65_to_127byte_frames;
320 	u32	tx_128_to_255byte_frames;
321 	u32	tx_256_to_511byte_frames;
322 	u32	tx_512_to_1023byte_frames;
323 	u32	tx_1024byte_frames;
324 	u32	net_bytes;
325 	u32	rx_sof_overruns;
326 	u32	rx_mof_overruns;
327 	u32	rx_dma_overruns;
328 };
329 
330 struct xgbe_hw_stats {
331 	u32	rx_good_frames;
332 	u32	rx_broadcast_frames;
333 	u32	rx_multicast_frames;
334 	u32	rx_pause_frames;
335 	u32	rx_crc_errors;
336 	u32	rx_align_code_errors;
337 	u32	rx_oversized_frames;
338 	u32	rx_jabber_frames;
339 	u32	rx_undersized_frames;
340 	u32	rx_fragments;
341 	u32	overrun_type4;
342 	u32	overrun_type5;
343 	u32	rx_bytes;
344 	u32	tx_good_frames;
345 	u32	tx_broadcast_frames;
346 	u32	tx_multicast_frames;
347 	u32	tx_pause_frames;
348 	u32	tx_deferred_frames;
349 	u32	tx_collision_frames;
350 	u32	tx_single_coll_frames;
351 	u32	tx_mult_coll_frames;
352 	u32	tx_excessive_collisions;
353 	u32	tx_late_collisions;
354 	u32	tx_underrun;
355 	u32	tx_carrier_sense_errors;
356 	u32	tx_bytes;
357 	u32	tx_64byte_frames;
358 	u32	tx_65_to_127byte_frames;
359 	u32	tx_128_to_255byte_frames;
360 	u32	tx_256_to_511byte_frames;
361 	u32	tx_512_to_1023byte_frames;
362 	u32	tx_1024byte_frames;
363 	u32	net_bytes;
364 	u32	rx_sof_overruns;
365 	u32	rx_mof_overruns;
366 	u32	rx_dma_overruns;
367 };
368 
369 struct gbenu_ss_regs {
370 	u32	id_ver;
371 	u32	synce_count;		/* NU */
372 	u32	synce_mux;		/* NU */
373 	u32	control;		/* 2U */
374 	u32	__rsvd_0[2];		/* 2U */
375 	u32	rgmii_status;		/* 2U */
376 	u32	ss_status;		/* 2U */
377 };
378 
379 struct gbenu_switch_regs {
380 	u32	id_ver;
381 	u32	control;
382 	u32	__rsvd_0[2];
383 	u32	emcontrol;
384 	u32	stat_port_en;
385 	u32	ptype;			/* NU */
386 	u32	soft_idle;
387 	u32	thru_rate;		/* NU */
388 	u32	gap_thresh;		/* NU */
389 	u32	tx_start_wds;		/* NU */
390 	u32	eee_prescale;		/* 2U */
391 	u32	tx_g_oflow_thresh_set;	/* NU */
392 	u32	tx_g_oflow_thresh_clr;	/* NU */
393 	u32	tx_g_buf_thresh_set_l;	/* NU */
394 	u32	tx_g_buf_thresh_set_h;	/* NU */
395 	u32	tx_g_buf_thresh_clr_l;	/* NU */
396 	u32	tx_g_buf_thresh_clr_h;	/* NU */
397 };
398 
399 struct gbenu_port_regs {
400 	u32	__rsvd_0;
401 	u32	control;
402 	u32	max_blks;		/* 2U */
403 	u32	mem_align1;
404 	u32	blk_cnt;
405 	u32	port_vlan;
406 	u32	tx_pri_map;		/* NU */
407 	u32	pri_ctl;		/* 2U */
408 	u32	rx_pri_map;
409 	u32	rx_maxlen;
410 	u32	tx_blks_pri;		/* NU */
411 	u32	__rsvd_1;
412 	u32	idle2lpi;		/* 2U */
413 	u32	lpi2idle;		/* 2U */
414 	u32	eee_status;		/* 2U */
415 	u32	__rsvd_2;
416 	u32	__rsvd_3[176];		/* NU: more to add */
417 	u32	__rsvd_4[2];
418 	u32	sa_lo;
419 	u32	sa_hi;
420 	u32	ts_ctl;
421 	u32	ts_seq_ltype;
422 	u32	ts_vlan;
423 	u32	ts_ctl_ltype2;
424 	u32	ts_ctl2;
425 };
426 
427 struct gbenu_host_port_regs {
428 	u32	__rsvd_0;
429 	u32	control;
430 	u32	flow_id_offset;		/* 2U */
431 	u32	__rsvd_1;
432 	u32	blk_cnt;
433 	u32	port_vlan;
434 	u32	tx_pri_map;		/* NU */
435 	u32	pri_ctl;
436 	u32	rx_pri_map;
437 	u32	rx_maxlen;
438 	u32	tx_blks_pri;		/* NU */
439 	u32	__rsvd_2;
440 	u32	idle2lpi;		/* 2U */
441 	u32	lpi2wake;		/* 2U */
442 	u32	eee_status;		/* 2U */
443 	u32	__rsvd_3;
444 	u32	__rsvd_4[184];		/* NU */
445 	u32	host_blks_pri;		/* NU */
446 };
447 
448 struct gbenu_emac_regs {
449 	u32	mac_control;
450 	u32	mac_status;
451 	u32	soft_reset;
452 	u32	boff_test;
453 	u32	rx_pause;
454 	u32	__rsvd_0[11];		/* NU */
455 	u32	tx_pause;
456 	u32	__rsvd_1[11];		/* NU */
457 	u32	em_control;
458 	u32	tx_gap;
459 };
460 
461 /* Some hw stat regs are applicable to slave port only.
462  * This is handled by gbenu_et_stats struct.  Also some
463  * are for SS version NU and some are for 2U.
464  */
465 struct gbenu_hw_stats {
466 	u32	rx_good_frames;
467 	u32	rx_broadcast_frames;
468 	u32	rx_multicast_frames;
469 	u32	rx_pause_frames;		/* slave */
470 	u32	rx_crc_errors;
471 	u32	rx_align_code_errors;		/* slave */
472 	u32	rx_oversized_frames;
473 	u32	rx_jabber_frames;		/* slave */
474 	u32	rx_undersized_frames;
475 	u32	rx_fragments;			/* slave */
476 	u32	ale_drop;
477 	u32	ale_overrun_drop;
478 	u32	rx_bytes;
479 	u32	tx_good_frames;
480 	u32	tx_broadcast_frames;
481 	u32	tx_multicast_frames;
482 	u32	tx_pause_frames;		/* slave */
483 	u32	tx_deferred_frames;		/* slave */
484 	u32	tx_collision_frames;		/* slave */
485 	u32	tx_single_coll_frames;		/* slave */
486 	u32	tx_mult_coll_frames;		/* slave */
487 	u32	tx_excessive_collisions;	/* slave */
488 	u32	tx_late_collisions;		/* slave */
489 	u32	rx_ipg_error;			/* slave 10G only */
490 	u32	tx_carrier_sense_errors;	/* slave */
491 	u32	tx_bytes;
492 	u32	tx_64B_frames;
493 	u32	tx_65_to_127B_frames;
494 	u32	tx_128_to_255B_frames;
495 	u32	tx_256_to_511B_frames;
496 	u32	tx_512_to_1023B_frames;
497 	u32	tx_1024B_frames;
498 	u32	net_bytes;
499 	u32	rx_bottom_fifo_drop;
500 	u32	rx_port_mask_drop;
501 	u32	rx_top_fifo_drop;
502 	u32	ale_rate_limit_drop;
503 	u32	ale_vid_ingress_drop;
504 	u32	ale_da_eq_sa_drop;
505 	u32	__rsvd_0[3];
506 	u32	ale_unknown_ucast;
507 	u32	ale_unknown_ucast_bytes;
508 	u32	ale_unknown_mcast;
509 	u32	ale_unknown_mcast_bytes;
510 	u32	ale_unknown_bcast;
511 	u32	ale_unknown_bcast_bytes;
512 	u32	ale_pol_match;
513 	u32	ale_pol_match_red;		/* NU */
514 	u32	ale_pol_match_yellow;		/* NU */
515 	u32	__rsvd_1[44];
516 	u32	tx_mem_protect_err;
517 	/* following NU only */
518 	u32	tx_pri0;
519 	u32	tx_pri1;
520 	u32	tx_pri2;
521 	u32	tx_pri3;
522 	u32	tx_pri4;
523 	u32	tx_pri5;
524 	u32	tx_pri6;
525 	u32	tx_pri7;
526 	u32	tx_pri0_bcnt;
527 	u32	tx_pri1_bcnt;
528 	u32	tx_pri2_bcnt;
529 	u32	tx_pri3_bcnt;
530 	u32	tx_pri4_bcnt;
531 	u32	tx_pri5_bcnt;
532 	u32	tx_pri6_bcnt;
533 	u32	tx_pri7_bcnt;
534 	u32	tx_pri0_drop;
535 	u32	tx_pri1_drop;
536 	u32	tx_pri2_drop;
537 	u32	tx_pri3_drop;
538 	u32	tx_pri4_drop;
539 	u32	tx_pri5_drop;
540 	u32	tx_pri6_drop;
541 	u32	tx_pri7_drop;
542 	u32	tx_pri0_drop_bcnt;
543 	u32	tx_pri1_drop_bcnt;
544 	u32	tx_pri2_drop_bcnt;
545 	u32	tx_pri3_drop_bcnt;
546 	u32	tx_pri4_drop_bcnt;
547 	u32	tx_pri5_drop_bcnt;
548 	u32	tx_pri6_drop_bcnt;
549 	u32	tx_pri7_drop_bcnt;
550 };
551 
552 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
553 
554 struct gbe_ss_regs {
555 	u32	id_ver;
556 	u32	synce_count;
557 	u32	synce_mux;
558 };
559 
560 struct gbe_ss_regs_ofs {
561 	u16	id_ver;
562 	u16	control;
563 	u16	rgmii_status; /* 2U */
564 };
565 
566 struct gbe_switch_regs {
567 	u32	id_ver;
568 	u32	control;
569 	u32	soft_reset;
570 	u32	stat_port_en;
571 	u32	ptype;
572 	u32	soft_idle;
573 	u32	thru_rate;
574 	u32	gap_thresh;
575 	u32	tx_start_wds;
576 	u32	flow_control;
577 };
578 
579 struct gbe_switch_regs_ofs {
580 	u16	id_ver;
581 	u16	control;
582 	u16	soft_reset;
583 	u16	emcontrol;
584 	u16	stat_port_en;
585 	u16	ptype;
586 	u16	flow_control;
587 };
588 
589 struct gbe_port_regs {
590 	u32	max_blks;
591 	u32	blk_cnt;
592 	u32	port_vlan;
593 	u32	tx_pri_map;
594 	u32	sa_lo;
595 	u32	sa_hi;
596 	u32	ts_ctl;
597 	u32	ts_seq_ltype;
598 	u32	ts_vlan;
599 	u32	ts_ctl_ltype2;
600 	u32	ts_ctl2;
601 };
602 
603 struct gbe_port_regs_ofs {
604 	u16	port_vlan;
605 	u16	tx_pri_map;
606 	u16     rx_pri_map;
607 	u16	sa_lo;
608 	u16	sa_hi;
609 	u16	ts_ctl;
610 	u16	ts_seq_ltype;
611 	u16	ts_vlan;
612 	u16	ts_ctl_ltype2;
613 	u16	ts_ctl2;
614 	u16	rx_maxlen;	/* 2U, NU */
615 };
616 
617 struct gbe_host_port_regs {
618 	u32	src_id;
619 	u32	port_vlan;
620 	u32	rx_pri_map;
621 	u32	rx_maxlen;
622 };
623 
624 struct gbe_host_port_regs_ofs {
625 	u16	port_vlan;
626 	u16	tx_pri_map;
627 	u16	rx_maxlen;
628 };
629 
630 struct gbe_emac_regs {
631 	u32	id_ver;
632 	u32	mac_control;
633 	u32	mac_status;
634 	u32	soft_reset;
635 	u32	rx_maxlen;
636 	u32	__reserved_0;
637 	u32	rx_pause;
638 	u32	tx_pause;
639 	u32	__reserved_1;
640 	u32	rx_pri_map;
641 	u32	rsvd[6];
642 };
643 
644 struct gbe_emac_regs_ofs {
645 	u16	mac_control;
646 	u16	soft_reset;
647 	u16	rx_maxlen;
648 };
649 
650 struct gbe_hw_stats {
651 	u32	rx_good_frames;
652 	u32	rx_broadcast_frames;
653 	u32	rx_multicast_frames;
654 	u32	rx_pause_frames;
655 	u32	rx_crc_errors;
656 	u32	rx_align_code_errors;
657 	u32	rx_oversized_frames;
658 	u32	rx_jabber_frames;
659 	u32	rx_undersized_frames;
660 	u32	rx_fragments;
661 	u32	__pad_0[2];
662 	u32	rx_bytes;
663 	u32	tx_good_frames;
664 	u32	tx_broadcast_frames;
665 	u32	tx_multicast_frames;
666 	u32	tx_pause_frames;
667 	u32	tx_deferred_frames;
668 	u32	tx_collision_frames;
669 	u32	tx_single_coll_frames;
670 	u32	tx_mult_coll_frames;
671 	u32	tx_excessive_collisions;
672 	u32	tx_late_collisions;
673 	u32	tx_underrun;
674 	u32	tx_carrier_sense_errors;
675 	u32	tx_bytes;
676 	u32	tx_64byte_frames;
677 	u32	tx_65_to_127byte_frames;
678 	u32	tx_128_to_255byte_frames;
679 	u32	tx_256_to_511byte_frames;
680 	u32	tx_512_to_1023byte_frames;
681 	u32	tx_1024byte_frames;
682 	u32	net_bytes;
683 	u32	rx_sof_overruns;
684 	u32	rx_mof_overruns;
685 	u32	rx_dma_overruns;
686 };
687 
688 #define GBE_MAX_HW_STAT_MODS			9
689 #define GBE_HW_STATS_REG_MAP_SZ			0x100
690 
691 struct ts_ctl {
692 	int     uni;
693 	u8      dst_port_map;
694 	u8      maddr_map;
695 	u8      ts_mcast_type;
696 };
697 
698 struct gbe_slave {
699 	void __iomem			*port_regs;
700 	void __iomem			*emac_regs;
701 	struct gbe_port_regs_ofs	port_regs_ofs;
702 	struct gbe_emac_regs_ofs	emac_regs_ofs;
703 	int				slave_num; /* 0 based logical number */
704 	int				port_num;  /* actual port number */
705 	atomic_t			link_state;
706 	bool				open;
707 	struct phy_device		*phy;
708 	u32				link_interface;
709 	u32				mac_control;
710 	u8				phy_port_t;
711 	struct device_node		*node;
712 	struct device_node		*phy_node;
713 	struct ts_ctl                   ts_ctl;
714 	struct list_head		slave_list;
715 };
716 
717 struct gbe_priv {
718 	struct device			*dev;
719 	struct netcp_device		*netcp_device;
720 	struct timer_list		timer;
721 	u32				num_slaves;
722 	u32				ale_entries;
723 	u32				ale_ports;
724 	bool				enable_ale;
725 	u8				max_num_slaves;
726 	u8				max_num_ports; /* max_num_slaves + 1 */
727 	u8				num_stats_mods;
728 	struct netcp_tx_pipe		tx_pipe;
729 
730 	int				host_port;
731 	u32				rx_packet_max;
732 	u32				ss_version;
733 	u32				stats_en_mask;
734 
735 	void __iomem			*ss_regs;
736 	void __iomem			*switch_regs;
737 	void __iomem			*host_port_regs;
738 	void __iomem			*ale_reg;
739 	void __iomem                    *cpts_reg;
740 	void __iomem			*sgmii_port_regs;
741 	void __iomem			*sgmii_port34_regs;
742 	void __iomem			*xgbe_serdes_regs;
743 	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
744 
745 	struct gbe_ss_regs_ofs		ss_regs_ofs;
746 	struct gbe_switch_regs_ofs	switch_regs_ofs;
747 	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
748 
749 	struct cpsw_ale			*ale;
750 	unsigned int			tx_queue_id;
751 	const char			*dma_chan_name;
752 
753 	struct list_head		gbe_intf_head;
754 	struct list_head		secondary_slaves;
755 	struct net_device		*dummy_ndev;
756 
757 	u64				*hw_stats;
758 	u32				*hw_stats_prev;
759 	const struct netcp_ethtool_stat *et_stats;
760 	int				num_et_stats;
761 	/*  Lock for updating the hwstats */
762 	spinlock_t			hw_stats_lock;
763 
764 	int                             cpts_registered;
765 	struct cpts                     *cpts;
766 };
767 
768 struct gbe_intf {
769 	struct net_device	*ndev;
770 	struct device		*dev;
771 	struct gbe_priv		*gbe_dev;
772 	struct netcp_tx_pipe	tx_pipe;
773 	struct gbe_slave	*slave;
774 	struct list_head	gbe_intf_list;
775 	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
776 };
777 
778 static struct netcp_module gbe_module;
779 static struct netcp_module xgbe_module;
780 
781 /* Statistic management */
782 struct netcp_ethtool_stat {
783 	char desc[ETH_GSTRING_LEN];
784 	int type;
785 	u32 size;
786 	int offset;
787 };
788 
789 #define GBE_STATSA_INFO(field)						\
790 {									\
791 	"GBE_A:"#field, GBE_STATSA_MODULE,				\
792 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
793 	offsetof(struct gbe_hw_stats, field)				\
794 }
795 
796 #define GBE_STATSB_INFO(field)						\
797 {									\
798 	"GBE_B:"#field, GBE_STATSB_MODULE,				\
799 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
800 	offsetof(struct gbe_hw_stats, field)				\
801 }
802 
803 #define GBE_STATSC_INFO(field)						\
804 {									\
805 	"GBE_C:"#field, GBE_STATSC_MODULE,				\
806 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
807 	offsetof(struct gbe_hw_stats, field)				\
808 }
809 
810 #define GBE_STATSD_INFO(field)						\
811 {									\
812 	"GBE_D:"#field, GBE_STATSD_MODULE,				\
813 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
814 	offsetof(struct gbe_hw_stats, field)				\
815 }
816 
817 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
818 	/* GBE module A */
819 	GBE_STATSA_INFO(rx_good_frames),
820 	GBE_STATSA_INFO(rx_broadcast_frames),
821 	GBE_STATSA_INFO(rx_multicast_frames),
822 	GBE_STATSA_INFO(rx_pause_frames),
823 	GBE_STATSA_INFO(rx_crc_errors),
824 	GBE_STATSA_INFO(rx_align_code_errors),
825 	GBE_STATSA_INFO(rx_oversized_frames),
826 	GBE_STATSA_INFO(rx_jabber_frames),
827 	GBE_STATSA_INFO(rx_undersized_frames),
828 	GBE_STATSA_INFO(rx_fragments),
829 	GBE_STATSA_INFO(rx_bytes),
830 	GBE_STATSA_INFO(tx_good_frames),
831 	GBE_STATSA_INFO(tx_broadcast_frames),
832 	GBE_STATSA_INFO(tx_multicast_frames),
833 	GBE_STATSA_INFO(tx_pause_frames),
834 	GBE_STATSA_INFO(tx_deferred_frames),
835 	GBE_STATSA_INFO(tx_collision_frames),
836 	GBE_STATSA_INFO(tx_single_coll_frames),
837 	GBE_STATSA_INFO(tx_mult_coll_frames),
838 	GBE_STATSA_INFO(tx_excessive_collisions),
839 	GBE_STATSA_INFO(tx_late_collisions),
840 	GBE_STATSA_INFO(tx_underrun),
841 	GBE_STATSA_INFO(tx_carrier_sense_errors),
842 	GBE_STATSA_INFO(tx_bytes),
843 	GBE_STATSA_INFO(tx_64byte_frames),
844 	GBE_STATSA_INFO(tx_65_to_127byte_frames),
845 	GBE_STATSA_INFO(tx_128_to_255byte_frames),
846 	GBE_STATSA_INFO(tx_256_to_511byte_frames),
847 	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
848 	GBE_STATSA_INFO(tx_1024byte_frames),
849 	GBE_STATSA_INFO(net_bytes),
850 	GBE_STATSA_INFO(rx_sof_overruns),
851 	GBE_STATSA_INFO(rx_mof_overruns),
852 	GBE_STATSA_INFO(rx_dma_overruns),
853 	/* GBE module B */
854 	GBE_STATSB_INFO(rx_good_frames),
855 	GBE_STATSB_INFO(rx_broadcast_frames),
856 	GBE_STATSB_INFO(rx_multicast_frames),
857 	GBE_STATSB_INFO(rx_pause_frames),
858 	GBE_STATSB_INFO(rx_crc_errors),
859 	GBE_STATSB_INFO(rx_align_code_errors),
860 	GBE_STATSB_INFO(rx_oversized_frames),
861 	GBE_STATSB_INFO(rx_jabber_frames),
862 	GBE_STATSB_INFO(rx_undersized_frames),
863 	GBE_STATSB_INFO(rx_fragments),
864 	GBE_STATSB_INFO(rx_bytes),
865 	GBE_STATSB_INFO(tx_good_frames),
866 	GBE_STATSB_INFO(tx_broadcast_frames),
867 	GBE_STATSB_INFO(tx_multicast_frames),
868 	GBE_STATSB_INFO(tx_pause_frames),
869 	GBE_STATSB_INFO(tx_deferred_frames),
870 	GBE_STATSB_INFO(tx_collision_frames),
871 	GBE_STATSB_INFO(tx_single_coll_frames),
872 	GBE_STATSB_INFO(tx_mult_coll_frames),
873 	GBE_STATSB_INFO(tx_excessive_collisions),
874 	GBE_STATSB_INFO(tx_late_collisions),
875 	GBE_STATSB_INFO(tx_underrun),
876 	GBE_STATSB_INFO(tx_carrier_sense_errors),
877 	GBE_STATSB_INFO(tx_bytes),
878 	GBE_STATSB_INFO(tx_64byte_frames),
879 	GBE_STATSB_INFO(tx_65_to_127byte_frames),
880 	GBE_STATSB_INFO(tx_128_to_255byte_frames),
881 	GBE_STATSB_INFO(tx_256_to_511byte_frames),
882 	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
883 	GBE_STATSB_INFO(tx_1024byte_frames),
884 	GBE_STATSB_INFO(net_bytes),
885 	GBE_STATSB_INFO(rx_sof_overruns),
886 	GBE_STATSB_INFO(rx_mof_overruns),
887 	GBE_STATSB_INFO(rx_dma_overruns),
888 	/* GBE module C */
889 	GBE_STATSC_INFO(rx_good_frames),
890 	GBE_STATSC_INFO(rx_broadcast_frames),
891 	GBE_STATSC_INFO(rx_multicast_frames),
892 	GBE_STATSC_INFO(rx_pause_frames),
893 	GBE_STATSC_INFO(rx_crc_errors),
894 	GBE_STATSC_INFO(rx_align_code_errors),
895 	GBE_STATSC_INFO(rx_oversized_frames),
896 	GBE_STATSC_INFO(rx_jabber_frames),
897 	GBE_STATSC_INFO(rx_undersized_frames),
898 	GBE_STATSC_INFO(rx_fragments),
899 	GBE_STATSC_INFO(rx_bytes),
900 	GBE_STATSC_INFO(tx_good_frames),
901 	GBE_STATSC_INFO(tx_broadcast_frames),
902 	GBE_STATSC_INFO(tx_multicast_frames),
903 	GBE_STATSC_INFO(tx_pause_frames),
904 	GBE_STATSC_INFO(tx_deferred_frames),
905 	GBE_STATSC_INFO(tx_collision_frames),
906 	GBE_STATSC_INFO(tx_single_coll_frames),
907 	GBE_STATSC_INFO(tx_mult_coll_frames),
908 	GBE_STATSC_INFO(tx_excessive_collisions),
909 	GBE_STATSC_INFO(tx_late_collisions),
910 	GBE_STATSC_INFO(tx_underrun),
911 	GBE_STATSC_INFO(tx_carrier_sense_errors),
912 	GBE_STATSC_INFO(tx_bytes),
913 	GBE_STATSC_INFO(tx_64byte_frames),
914 	GBE_STATSC_INFO(tx_65_to_127byte_frames),
915 	GBE_STATSC_INFO(tx_128_to_255byte_frames),
916 	GBE_STATSC_INFO(tx_256_to_511byte_frames),
917 	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
918 	GBE_STATSC_INFO(tx_1024byte_frames),
919 	GBE_STATSC_INFO(net_bytes),
920 	GBE_STATSC_INFO(rx_sof_overruns),
921 	GBE_STATSC_INFO(rx_mof_overruns),
922 	GBE_STATSC_INFO(rx_dma_overruns),
923 	/* GBE module D */
924 	GBE_STATSD_INFO(rx_good_frames),
925 	GBE_STATSD_INFO(rx_broadcast_frames),
926 	GBE_STATSD_INFO(rx_multicast_frames),
927 	GBE_STATSD_INFO(rx_pause_frames),
928 	GBE_STATSD_INFO(rx_crc_errors),
929 	GBE_STATSD_INFO(rx_align_code_errors),
930 	GBE_STATSD_INFO(rx_oversized_frames),
931 	GBE_STATSD_INFO(rx_jabber_frames),
932 	GBE_STATSD_INFO(rx_undersized_frames),
933 	GBE_STATSD_INFO(rx_fragments),
934 	GBE_STATSD_INFO(rx_bytes),
935 	GBE_STATSD_INFO(tx_good_frames),
936 	GBE_STATSD_INFO(tx_broadcast_frames),
937 	GBE_STATSD_INFO(tx_multicast_frames),
938 	GBE_STATSD_INFO(tx_pause_frames),
939 	GBE_STATSD_INFO(tx_deferred_frames),
940 	GBE_STATSD_INFO(tx_collision_frames),
941 	GBE_STATSD_INFO(tx_single_coll_frames),
942 	GBE_STATSD_INFO(tx_mult_coll_frames),
943 	GBE_STATSD_INFO(tx_excessive_collisions),
944 	GBE_STATSD_INFO(tx_late_collisions),
945 	GBE_STATSD_INFO(tx_underrun),
946 	GBE_STATSD_INFO(tx_carrier_sense_errors),
947 	GBE_STATSD_INFO(tx_bytes),
948 	GBE_STATSD_INFO(tx_64byte_frames),
949 	GBE_STATSD_INFO(tx_65_to_127byte_frames),
950 	GBE_STATSD_INFO(tx_128_to_255byte_frames),
951 	GBE_STATSD_INFO(tx_256_to_511byte_frames),
952 	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
953 	GBE_STATSD_INFO(tx_1024byte_frames),
954 	GBE_STATSD_INFO(net_bytes),
955 	GBE_STATSD_INFO(rx_sof_overruns),
956 	GBE_STATSD_INFO(rx_mof_overruns),
957 	GBE_STATSD_INFO(rx_dma_overruns),
958 };
959 
960 /* This is the size of entries in GBENU_STATS_HOST */
961 #define GBENU_ET_STATS_HOST_SIZE	52
962 
963 #define GBENU_STATS_HOST(field)					\
964 {								\
965 	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
966 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
967 	offsetof(struct gbenu_hw_stats, field)			\
968 }
969 
970 /* This is the size of entries in GBENU_STATS_PORT */
971 #define GBENU_ET_STATS_PORT_SIZE	65
972 
973 #define GBENU_STATS_P1(field)					\
974 {								\
975 	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
976 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
977 	offsetof(struct gbenu_hw_stats, field)			\
978 }
979 
980 #define GBENU_STATS_P2(field)					\
981 {								\
982 	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
983 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
984 	offsetof(struct gbenu_hw_stats, field)			\
985 }
986 
987 #define GBENU_STATS_P3(field)					\
988 {								\
989 	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
990 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
991 	offsetof(struct gbenu_hw_stats, field)			\
992 }
993 
994 #define GBENU_STATS_P4(field)					\
995 {								\
996 	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
997 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
998 	offsetof(struct gbenu_hw_stats, field)			\
999 }
1000 
1001 #define GBENU_STATS_P5(field)					\
1002 {								\
1003 	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
1004 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1005 	offsetof(struct gbenu_hw_stats, field)			\
1006 }
1007 
1008 #define GBENU_STATS_P6(field)					\
1009 {								\
1010 	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
1011 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1012 	offsetof(struct gbenu_hw_stats, field)			\
1013 }
1014 
1015 #define GBENU_STATS_P7(field)					\
1016 {								\
1017 	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1018 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1019 	offsetof(struct gbenu_hw_stats, field)			\
1020 }
1021 
1022 #define GBENU_STATS_P8(field)					\
1023 {								\
1024 	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1025 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1026 	offsetof(struct gbenu_hw_stats, field)			\
1027 }
1028 
1029 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1030 	/* GBENU Host Module */
1031 	GBENU_STATS_HOST(rx_good_frames),
1032 	GBENU_STATS_HOST(rx_broadcast_frames),
1033 	GBENU_STATS_HOST(rx_multicast_frames),
1034 	GBENU_STATS_HOST(rx_crc_errors),
1035 	GBENU_STATS_HOST(rx_oversized_frames),
1036 	GBENU_STATS_HOST(rx_undersized_frames),
1037 	GBENU_STATS_HOST(ale_drop),
1038 	GBENU_STATS_HOST(ale_overrun_drop),
1039 	GBENU_STATS_HOST(rx_bytes),
1040 	GBENU_STATS_HOST(tx_good_frames),
1041 	GBENU_STATS_HOST(tx_broadcast_frames),
1042 	GBENU_STATS_HOST(tx_multicast_frames),
1043 	GBENU_STATS_HOST(tx_bytes),
1044 	GBENU_STATS_HOST(tx_64B_frames),
1045 	GBENU_STATS_HOST(tx_65_to_127B_frames),
1046 	GBENU_STATS_HOST(tx_128_to_255B_frames),
1047 	GBENU_STATS_HOST(tx_256_to_511B_frames),
1048 	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1049 	GBENU_STATS_HOST(tx_1024B_frames),
1050 	GBENU_STATS_HOST(net_bytes),
1051 	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1052 	GBENU_STATS_HOST(rx_port_mask_drop),
1053 	GBENU_STATS_HOST(rx_top_fifo_drop),
1054 	GBENU_STATS_HOST(ale_rate_limit_drop),
1055 	GBENU_STATS_HOST(ale_vid_ingress_drop),
1056 	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1057 	GBENU_STATS_HOST(ale_unknown_ucast),
1058 	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1059 	GBENU_STATS_HOST(ale_unknown_mcast),
1060 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1061 	GBENU_STATS_HOST(ale_unknown_bcast),
1062 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1063 	GBENU_STATS_HOST(ale_pol_match),
1064 	GBENU_STATS_HOST(ale_pol_match_red),
1065 	GBENU_STATS_HOST(ale_pol_match_yellow),
1066 	GBENU_STATS_HOST(tx_mem_protect_err),
1067 	GBENU_STATS_HOST(tx_pri0_drop),
1068 	GBENU_STATS_HOST(tx_pri1_drop),
1069 	GBENU_STATS_HOST(tx_pri2_drop),
1070 	GBENU_STATS_HOST(tx_pri3_drop),
1071 	GBENU_STATS_HOST(tx_pri4_drop),
1072 	GBENU_STATS_HOST(tx_pri5_drop),
1073 	GBENU_STATS_HOST(tx_pri6_drop),
1074 	GBENU_STATS_HOST(tx_pri7_drop),
1075 	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1076 	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1077 	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1078 	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1079 	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1080 	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1081 	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1082 	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1083 	/* GBENU Module 1 */
1084 	GBENU_STATS_P1(rx_good_frames),
1085 	GBENU_STATS_P1(rx_broadcast_frames),
1086 	GBENU_STATS_P1(rx_multicast_frames),
1087 	GBENU_STATS_P1(rx_pause_frames),
1088 	GBENU_STATS_P1(rx_crc_errors),
1089 	GBENU_STATS_P1(rx_align_code_errors),
1090 	GBENU_STATS_P1(rx_oversized_frames),
1091 	GBENU_STATS_P1(rx_jabber_frames),
1092 	GBENU_STATS_P1(rx_undersized_frames),
1093 	GBENU_STATS_P1(rx_fragments),
1094 	GBENU_STATS_P1(ale_drop),
1095 	GBENU_STATS_P1(ale_overrun_drop),
1096 	GBENU_STATS_P1(rx_bytes),
1097 	GBENU_STATS_P1(tx_good_frames),
1098 	GBENU_STATS_P1(tx_broadcast_frames),
1099 	GBENU_STATS_P1(tx_multicast_frames),
1100 	GBENU_STATS_P1(tx_pause_frames),
1101 	GBENU_STATS_P1(tx_deferred_frames),
1102 	GBENU_STATS_P1(tx_collision_frames),
1103 	GBENU_STATS_P1(tx_single_coll_frames),
1104 	GBENU_STATS_P1(tx_mult_coll_frames),
1105 	GBENU_STATS_P1(tx_excessive_collisions),
1106 	GBENU_STATS_P1(tx_late_collisions),
1107 	GBENU_STATS_P1(rx_ipg_error),
1108 	GBENU_STATS_P1(tx_carrier_sense_errors),
1109 	GBENU_STATS_P1(tx_bytes),
1110 	GBENU_STATS_P1(tx_64B_frames),
1111 	GBENU_STATS_P1(tx_65_to_127B_frames),
1112 	GBENU_STATS_P1(tx_128_to_255B_frames),
1113 	GBENU_STATS_P1(tx_256_to_511B_frames),
1114 	GBENU_STATS_P1(tx_512_to_1023B_frames),
1115 	GBENU_STATS_P1(tx_1024B_frames),
1116 	GBENU_STATS_P1(net_bytes),
1117 	GBENU_STATS_P1(rx_bottom_fifo_drop),
1118 	GBENU_STATS_P1(rx_port_mask_drop),
1119 	GBENU_STATS_P1(rx_top_fifo_drop),
1120 	GBENU_STATS_P1(ale_rate_limit_drop),
1121 	GBENU_STATS_P1(ale_vid_ingress_drop),
1122 	GBENU_STATS_P1(ale_da_eq_sa_drop),
1123 	GBENU_STATS_P1(ale_unknown_ucast),
1124 	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1125 	GBENU_STATS_P1(ale_unknown_mcast),
1126 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1127 	GBENU_STATS_P1(ale_unknown_bcast),
1128 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1129 	GBENU_STATS_P1(ale_pol_match),
1130 	GBENU_STATS_P1(ale_pol_match_red),
1131 	GBENU_STATS_P1(ale_pol_match_yellow),
1132 	GBENU_STATS_P1(tx_mem_protect_err),
1133 	GBENU_STATS_P1(tx_pri0_drop),
1134 	GBENU_STATS_P1(tx_pri1_drop),
1135 	GBENU_STATS_P1(tx_pri2_drop),
1136 	GBENU_STATS_P1(tx_pri3_drop),
1137 	GBENU_STATS_P1(tx_pri4_drop),
1138 	GBENU_STATS_P1(tx_pri5_drop),
1139 	GBENU_STATS_P1(tx_pri6_drop),
1140 	GBENU_STATS_P1(tx_pri7_drop),
1141 	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1142 	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1143 	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1144 	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1145 	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1146 	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1147 	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1148 	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1149 	/* GBENU Module 2 */
1150 	GBENU_STATS_P2(rx_good_frames),
1151 	GBENU_STATS_P2(rx_broadcast_frames),
1152 	GBENU_STATS_P2(rx_multicast_frames),
1153 	GBENU_STATS_P2(rx_pause_frames),
1154 	GBENU_STATS_P2(rx_crc_errors),
1155 	GBENU_STATS_P2(rx_align_code_errors),
1156 	GBENU_STATS_P2(rx_oversized_frames),
1157 	GBENU_STATS_P2(rx_jabber_frames),
1158 	GBENU_STATS_P2(rx_undersized_frames),
1159 	GBENU_STATS_P2(rx_fragments),
1160 	GBENU_STATS_P2(ale_drop),
1161 	GBENU_STATS_P2(ale_overrun_drop),
1162 	GBENU_STATS_P2(rx_bytes),
1163 	GBENU_STATS_P2(tx_good_frames),
1164 	GBENU_STATS_P2(tx_broadcast_frames),
1165 	GBENU_STATS_P2(tx_multicast_frames),
1166 	GBENU_STATS_P2(tx_pause_frames),
1167 	GBENU_STATS_P2(tx_deferred_frames),
1168 	GBENU_STATS_P2(tx_collision_frames),
1169 	GBENU_STATS_P2(tx_single_coll_frames),
1170 	GBENU_STATS_P2(tx_mult_coll_frames),
1171 	GBENU_STATS_P2(tx_excessive_collisions),
1172 	GBENU_STATS_P2(tx_late_collisions),
1173 	GBENU_STATS_P2(rx_ipg_error),
1174 	GBENU_STATS_P2(tx_carrier_sense_errors),
1175 	GBENU_STATS_P2(tx_bytes),
1176 	GBENU_STATS_P2(tx_64B_frames),
1177 	GBENU_STATS_P2(tx_65_to_127B_frames),
1178 	GBENU_STATS_P2(tx_128_to_255B_frames),
1179 	GBENU_STATS_P2(tx_256_to_511B_frames),
1180 	GBENU_STATS_P2(tx_512_to_1023B_frames),
1181 	GBENU_STATS_P2(tx_1024B_frames),
1182 	GBENU_STATS_P2(net_bytes),
1183 	GBENU_STATS_P2(rx_bottom_fifo_drop),
1184 	GBENU_STATS_P2(rx_port_mask_drop),
1185 	GBENU_STATS_P2(rx_top_fifo_drop),
1186 	GBENU_STATS_P2(ale_rate_limit_drop),
1187 	GBENU_STATS_P2(ale_vid_ingress_drop),
1188 	GBENU_STATS_P2(ale_da_eq_sa_drop),
1189 	GBENU_STATS_P2(ale_unknown_ucast),
1190 	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1191 	GBENU_STATS_P2(ale_unknown_mcast),
1192 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1193 	GBENU_STATS_P2(ale_unknown_bcast),
1194 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1195 	GBENU_STATS_P2(ale_pol_match),
1196 	GBENU_STATS_P2(ale_pol_match_red),
1197 	GBENU_STATS_P2(ale_pol_match_yellow),
1198 	GBENU_STATS_P2(tx_mem_protect_err),
1199 	GBENU_STATS_P2(tx_pri0_drop),
1200 	GBENU_STATS_P2(tx_pri1_drop),
1201 	GBENU_STATS_P2(tx_pri2_drop),
1202 	GBENU_STATS_P2(tx_pri3_drop),
1203 	GBENU_STATS_P2(tx_pri4_drop),
1204 	GBENU_STATS_P2(tx_pri5_drop),
1205 	GBENU_STATS_P2(tx_pri6_drop),
1206 	GBENU_STATS_P2(tx_pri7_drop),
1207 	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1208 	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1209 	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1210 	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1211 	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1212 	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1213 	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1214 	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1215 	/* GBENU Module 3 */
1216 	GBENU_STATS_P3(rx_good_frames),
1217 	GBENU_STATS_P3(rx_broadcast_frames),
1218 	GBENU_STATS_P3(rx_multicast_frames),
1219 	GBENU_STATS_P3(rx_pause_frames),
1220 	GBENU_STATS_P3(rx_crc_errors),
1221 	GBENU_STATS_P3(rx_align_code_errors),
1222 	GBENU_STATS_P3(rx_oversized_frames),
1223 	GBENU_STATS_P3(rx_jabber_frames),
1224 	GBENU_STATS_P3(rx_undersized_frames),
1225 	GBENU_STATS_P3(rx_fragments),
1226 	GBENU_STATS_P3(ale_drop),
1227 	GBENU_STATS_P3(ale_overrun_drop),
1228 	GBENU_STATS_P3(rx_bytes),
1229 	GBENU_STATS_P3(tx_good_frames),
1230 	GBENU_STATS_P3(tx_broadcast_frames),
1231 	GBENU_STATS_P3(tx_multicast_frames),
1232 	GBENU_STATS_P3(tx_pause_frames),
1233 	GBENU_STATS_P3(tx_deferred_frames),
1234 	GBENU_STATS_P3(tx_collision_frames),
1235 	GBENU_STATS_P3(tx_single_coll_frames),
1236 	GBENU_STATS_P3(tx_mult_coll_frames),
1237 	GBENU_STATS_P3(tx_excessive_collisions),
1238 	GBENU_STATS_P3(tx_late_collisions),
1239 	GBENU_STATS_P3(rx_ipg_error),
1240 	GBENU_STATS_P3(tx_carrier_sense_errors),
1241 	GBENU_STATS_P3(tx_bytes),
1242 	GBENU_STATS_P3(tx_64B_frames),
1243 	GBENU_STATS_P3(tx_65_to_127B_frames),
1244 	GBENU_STATS_P3(tx_128_to_255B_frames),
1245 	GBENU_STATS_P3(tx_256_to_511B_frames),
1246 	GBENU_STATS_P3(tx_512_to_1023B_frames),
1247 	GBENU_STATS_P3(tx_1024B_frames),
1248 	GBENU_STATS_P3(net_bytes),
1249 	GBENU_STATS_P3(rx_bottom_fifo_drop),
1250 	GBENU_STATS_P3(rx_port_mask_drop),
1251 	GBENU_STATS_P3(rx_top_fifo_drop),
1252 	GBENU_STATS_P3(ale_rate_limit_drop),
1253 	GBENU_STATS_P3(ale_vid_ingress_drop),
1254 	GBENU_STATS_P3(ale_da_eq_sa_drop),
1255 	GBENU_STATS_P3(ale_unknown_ucast),
1256 	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1257 	GBENU_STATS_P3(ale_unknown_mcast),
1258 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1259 	GBENU_STATS_P3(ale_unknown_bcast),
1260 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1261 	GBENU_STATS_P3(ale_pol_match),
1262 	GBENU_STATS_P3(ale_pol_match_red),
1263 	GBENU_STATS_P3(ale_pol_match_yellow),
1264 	GBENU_STATS_P3(tx_mem_protect_err),
1265 	GBENU_STATS_P3(tx_pri0_drop),
1266 	GBENU_STATS_P3(tx_pri1_drop),
1267 	GBENU_STATS_P3(tx_pri2_drop),
1268 	GBENU_STATS_P3(tx_pri3_drop),
1269 	GBENU_STATS_P3(tx_pri4_drop),
1270 	GBENU_STATS_P3(tx_pri5_drop),
1271 	GBENU_STATS_P3(tx_pri6_drop),
1272 	GBENU_STATS_P3(tx_pri7_drop),
1273 	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1274 	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1275 	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1276 	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1277 	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1278 	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1279 	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1280 	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1281 	/* GBENU Module 4 */
1282 	GBENU_STATS_P4(rx_good_frames),
1283 	GBENU_STATS_P4(rx_broadcast_frames),
1284 	GBENU_STATS_P4(rx_multicast_frames),
1285 	GBENU_STATS_P4(rx_pause_frames),
1286 	GBENU_STATS_P4(rx_crc_errors),
1287 	GBENU_STATS_P4(rx_align_code_errors),
1288 	GBENU_STATS_P4(rx_oversized_frames),
1289 	GBENU_STATS_P4(rx_jabber_frames),
1290 	GBENU_STATS_P4(rx_undersized_frames),
1291 	GBENU_STATS_P4(rx_fragments),
1292 	GBENU_STATS_P4(ale_drop),
1293 	GBENU_STATS_P4(ale_overrun_drop),
1294 	GBENU_STATS_P4(rx_bytes),
1295 	GBENU_STATS_P4(tx_good_frames),
1296 	GBENU_STATS_P4(tx_broadcast_frames),
1297 	GBENU_STATS_P4(tx_multicast_frames),
1298 	GBENU_STATS_P4(tx_pause_frames),
1299 	GBENU_STATS_P4(tx_deferred_frames),
1300 	GBENU_STATS_P4(tx_collision_frames),
1301 	GBENU_STATS_P4(tx_single_coll_frames),
1302 	GBENU_STATS_P4(tx_mult_coll_frames),
1303 	GBENU_STATS_P4(tx_excessive_collisions),
1304 	GBENU_STATS_P4(tx_late_collisions),
1305 	GBENU_STATS_P4(rx_ipg_error),
1306 	GBENU_STATS_P4(tx_carrier_sense_errors),
1307 	GBENU_STATS_P4(tx_bytes),
1308 	GBENU_STATS_P4(tx_64B_frames),
1309 	GBENU_STATS_P4(tx_65_to_127B_frames),
1310 	GBENU_STATS_P4(tx_128_to_255B_frames),
1311 	GBENU_STATS_P4(tx_256_to_511B_frames),
1312 	GBENU_STATS_P4(tx_512_to_1023B_frames),
1313 	GBENU_STATS_P4(tx_1024B_frames),
1314 	GBENU_STATS_P4(net_bytes),
1315 	GBENU_STATS_P4(rx_bottom_fifo_drop),
1316 	GBENU_STATS_P4(rx_port_mask_drop),
1317 	GBENU_STATS_P4(rx_top_fifo_drop),
1318 	GBENU_STATS_P4(ale_rate_limit_drop),
1319 	GBENU_STATS_P4(ale_vid_ingress_drop),
1320 	GBENU_STATS_P4(ale_da_eq_sa_drop),
1321 	GBENU_STATS_P4(ale_unknown_ucast),
1322 	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1323 	GBENU_STATS_P4(ale_unknown_mcast),
1324 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1325 	GBENU_STATS_P4(ale_unknown_bcast),
1326 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1327 	GBENU_STATS_P4(ale_pol_match),
1328 	GBENU_STATS_P4(ale_pol_match_red),
1329 	GBENU_STATS_P4(ale_pol_match_yellow),
1330 	GBENU_STATS_P4(tx_mem_protect_err),
1331 	GBENU_STATS_P4(tx_pri0_drop),
1332 	GBENU_STATS_P4(tx_pri1_drop),
1333 	GBENU_STATS_P4(tx_pri2_drop),
1334 	GBENU_STATS_P4(tx_pri3_drop),
1335 	GBENU_STATS_P4(tx_pri4_drop),
1336 	GBENU_STATS_P4(tx_pri5_drop),
1337 	GBENU_STATS_P4(tx_pri6_drop),
1338 	GBENU_STATS_P4(tx_pri7_drop),
1339 	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1340 	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1341 	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1342 	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1343 	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1344 	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1345 	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1346 	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1347 	/* GBENU Module 5 */
1348 	GBENU_STATS_P5(rx_good_frames),
1349 	GBENU_STATS_P5(rx_broadcast_frames),
1350 	GBENU_STATS_P5(rx_multicast_frames),
1351 	GBENU_STATS_P5(rx_pause_frames),
1352 	GBENU_STATS_P5(rx_crc_errors),
1353 	GBENU_STATS_P5(rx_align_code_errors),
1354 	GBENU_STATS_P5(rx_oversized_frames),
1355 	GBENU_STATS_P5(rx_jabber_frames),
1356 	GBENU_STATS_P5(rx_undersized_frames),
1357 	GBENU_STATS_P5(rx_fragments),
1358 	GBENU_STATS_P5(ale_drop),
1359 	GBENU_STATS_P5(ale_overrun_drop),
1360 	GBENU_STATS_P5(rx_bytes),
1361 	GBENU_STATS_P5(tx_good_frames),
1362 	GBENU_STATS_P5(tx_broadcast_frames),
1363 	GBENU_STATS_P5(tx_multicast_frames),
1364 	GBENU_STATS_P5(tx_pause_frames),
1365 	GBENU_STATS_P5(tx_deferred_frames),
1366 	GBENU_STATS_P5(tx_collision_frames),
1367 	GBENU_STATS_P5(tx_single_coll_frames),
1368 	GBENU_STATS_P5(tx_mult_coll_frames),
1369 	GBENU_STATS_P5(tx_excessive_collisions),
1370 	GBENU_STATS_P5(tx_late_collisions),
1371 	GBENU_STATS_P5(rx_ipg_error),
1372 	GBENU_STATS_P5(tx_carrier_sense_errors),
1373 	GBENU_STATS_P5(tx_bytes),
1374 	GBENU_STATS_P5(tx_64B_frames),
1375 	GBENU_STATS_P5(tx_65_to_127B_frames),
1376 	GBENU_STATS_P5(tx_128_to_255B_frames),
1377 	GBENU_STATS_P5(tx_256_to_511B_frames),
1378 	GBENU_STATS_P5(tx_512_to_1023B_frames),
1379 	GBENU_STATS_P5(tx_1024B_frames),
1380 	GBENU_STATS_P5(net_bytes),
1381 	GBENU_STATS_P5(rx_bottom_fifo_drop),
1382 	GBENU_STATS_P5(rx_port_mask_drop),
1383 	GBENU_STATS_P5(rx_top_fifo_drop),
1384 	GBENU_STATS_P5(ale_rate_limit_drop),
1385 	GBENU_STATS_P5(ale_vid_ingress_drop),
1386 	GBENU_STATS_P5(ale_da_eq_sa_drop),
1387 	GBENU_STATS_P5(ale_unknown_ucast),
1388 	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1389 	GBENU_STATS_P5(ale_unknown_mcast),
1390 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1391 	GBENU_STATS_P5(ale_unknown_bcast),
1392 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1393 	GBENU_STATS_P5(ale_pol_match),
1394 	GBENU_STATS_P5(ale_pol_match_red),
1395 	GBENU_STATS_P5(ale_pol_match_yellow),
1396 	GBENU_STATS_P5(tx_mem_protect_err),
1397 	GBENU_STATS_P5(tx_pri0_drop),
1398 	GBENU_STATS_P5(tx_pri1_drop),
1399 	GBENU_STATS_P5(tx_pri2_drop),
1400 	GBENU_STATS_P5(tx_pri3_drop),
1401 	GBENU_STATS_P5(tx_pri4_drop),
1402 	GBENU_STATS_P5(tx_pri5_drop),
1403 	GBENU_STATS_P5(tx_pri6_drop),
1404 	GBENU_STATS_P5(tx_pri7_drop),
1405 	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1406 	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1407 	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1408 	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1409 	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1410 	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1411 	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1412 	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1413 	/* GBENU Module 6 */
1414 	GBENU_STATS_P6(rx_good_frames),
1415 	GBENU_STATS_P6(rx_broadcast_frames),
1416 	GBENU_STATS_P6(rx_multicast_frames),
1417 	GBENU_STATS_P6(rx_pause_frames),
1418 	GBENU_STATS_P6(rx_crc_errors),
1419 	GBENU_STATS_P6(rx_align_code_errors),
1420 	GBENU_STATS_P6(rx_oversized_frames),
1421 	GBENU_STATS_P6(rx_jabber_frames),
1422 	GBENU_STATS_P6(rx_undersized_frames),
1423 	GBENU_STATS_P6(rx_fragments),
1424 	GBENU_STATS_P6(ale_drop),
1425 	GBENU_STATS_P6(ale_overrun_drop),
1426 	GBENU_STATS_P6(rx_bytes),
1427 	GBENU_STATS_P6(tx_good_frames),
1428 	GBENU_STATS_P6(tx_broadcast_frames),
1429 	GBENU_STATS_P6(tx_multicast_frames),
1430 	GBENU_STATS_P6(tx_pause_frames),
1431 	GBENU_STATS_P6(tx_deferred_frames),
1432 	GBENU_STATS_P6(tx_collision_frames),
1433 	GBENU_STATS_P6(tx_single_coll_frames),
1434 	GBENU_STATS_P6(tx_mult_coll_frames),
1435 	GBENU_STATS_P6(tx_excessive_collisions),
1436 	GBENU_STATS_P6(tx_late_collisions),
1437 	GBENU_STATS_P6(rx_ipg_error),
1438 	GBENU_STATS_P6(tx_carrier_sense_errors),
1439 	GBENU_STATS_P6(tx_bytes),
1440 	GBENU_STATS_P6(tx_64B_frames),
1441 	GBENU_STATS_P6(tx_65_to_127B_frames),
1442 	GBENU_STATS_P6(tx_128_to_255B_frames),
1443 	GBENU_STATS_P6(tx_256_to_511B_frames),
1444 	GBENU_STATS_P6(tx_512_to_1023B_frames),
1445 	GBENU_STATS_P6(tx_1024B_frames),
1446 	GBENU_STATS_P6(net_bytes),
1447 	GBENU_STATS_P6(rx_bottom_fifo_drop),
1448 	GBENU_STATS_P6(rx_port_mask_drop),
1449 	GBENU_STATS_P6(rx_top_fifo_drop),
1450 	GBENU_STATS_P6(ale_rate_limit_drop),
1451 	GBENU_STATS_P6(ale_vid_ingress_drop),
1452 	GBENU_STATS_P6(ale_da_eq_sa_drop),
1453 	GBENU_STATS_P6(ale_unknown_ucast),
1454 	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1455 	GBENU_STATS_P6(ale_unknown_mcast),
1456 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1457 	GBENU_STATS_P6(ale_unknown_bcast),
1458 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1459 	GBENU_STATS_P6(ale_pol_match),
1460 	GBENU_STATS_P6(ale_pol_match_red),
1461 	GBENU_STATS_P6(ale_pol_match_yellow),
1462 	GBENU_STATS_P6(tx_mem_protect_err),
1463 	GBENU_STATS_P6(tx_pri0_drop),
1464 	GBENU_STATS_P6(tx_pri1_drop),
1465 	GBENU_STATS_P6(tx_pri2_drop),
1466 	GBENU_STATS_P6(tx_pri3_drop),
1467 	GBENU_STATS_P6(tx_pri4_drop),
1468 	GBENU_STATS_P6(tx_pri5_drop),
1469 	GBENU_STATS_P6(tx_pri6_drop),
1470 	GBENU_STATS_P6(tx_pri7_drop),
1471 	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1472 	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1473 	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1474 	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1475 	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1476 	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1477 	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1478 	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1479 	/* GBENU Module 7 */
1480 	GBENU_STATS_P7(rx_good_frames),
1481 	GBENU_STATS_P7(rx_broadcast_frames),
1482 	GBENU_STATS_P7(rx_multicast_frames),
1483 	GBENU_STATS_P7(rx_pause_frames),
1484 	GBENU_STATS_P7(rx_crc_errors),
1485 	GBENU_STATS_P7(rx_align_code_errors),
1486 	GBENU_STATS_P7(rx_oversized_frames),
1487 	GBENU_STATS_P7(rx_jabber_frames),
1488 	GBENU_STATS_P7(rx_undersized_frames),
1489 	GBENU_STATS_P7(rx_fragments),
1490 	GBENU_STATS_P7(ale_drop),
1491 	GBENU_STATS_P7(ale_overrun_drop),
1492 	GBENU_STATS_P7(rx_bytes),
1493 	GBENU_STATS_P7(tx_good_frames),
1494 	GBENU_STATS_P7(tx_broadcast_frames),
1495 	GBENU_STATS_P7(tx_multicast_frames),
1496 	GBENU_STATS_P7(tx_pause_frames),
1497 	GBENU_STATS_P7(tx_deferred_frames),
1498 	GBENU_STATS_P7(tx_collision_frames),
1499 	GBENU_STATS_P7(tx_single_coll_frames),
1500 	GBENU_STATS_P7(tx_mult_coll_frames),
1501 	GBENU_STATS_P7(tx_excessive_collisions),
1502 	GBENU_STATS_P7(tx_late_collisions),
1503 	GBENU_STATS_P7(rx_ipg_error),
1504 	GBENU_STATS_P7(tx_carrier_sense_errors),
1505 	GBENU_STATS_P7(tx_bytes),
1506 	GBENU_STATS_P7(tx_64B_frames),
1507 	GBENU_STATS_P7(tx_65_to_127B_frames),
1508 	GBENU_STATS_P7(tx_128_to_255B_frames),
1509 	GBENU_STATS_P7(tx_256_to_511B_frames),
1510 	GBENU_STATS_P7(tx_512_to_1023B_frames),
1511 	GBENU_STATS_P7(tx_1024B_frames),
1512 	GBENU_STATS_P7(net_bytes),
1513 	GBENU_STATS_P7(rx_bottom_fifo_drop),
1514 	GBENU_STATS_P7(rx_port_mask_drop),
1515 	GBENU_STATS_P7(rx_top_fifo_drop),
1516 	GBENU_STATS_P7(ale_rate_limit_drop),
1517 	GBENU_STATS_P7(ale_vid_ingress_drop),
1518 	GBENU_STATS_P7(ale_da_eq_sa_drop),
1519 	GBENU_STATS_P7(ale_unknown_ucast),
1520 	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1521 	GBENU_STATS_P7(ale_unknown_mcast),
1522 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1523 	GBENU_STATS_P7(ale_unknown_bcast),
1524 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1525 	GBENU_STATS_P7(ale_pol_match),
1526 	GBENU_STATS_P7(ale_pol_match_red),
1527 	GBENU_STATS_P7(ale_pol_match_yellow),
1528 	GBENU_STATS_P7(tx_mem_protect_err),
1529 	GBENU_STATS_P7(tx_pri0_drop),
1530 	GBENU_STATS_P7(tx_pri1_drop),
1531 	GBENU_STATS_P7(tx_pri2_drop),
1532 	GBENU_STATS_P7(tx_pri3_drop),
1533 	GBENU_STATS_P7(tx_pri4_drop),
1534 	GBENU_STATS_P7(tx_pri5_drop),
1535 	GBENU_STATS_P7(tx_pri6_drop),
1536 	GBENU_STATS_P7(tx_pri7_drop),
1537 	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1538 	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1539 	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1540 	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1541 	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1542 	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1543 	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1544 	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1545 	/* GBENU Module 8 */
1546 	GBENU_STATS_P8(rx_good_frames),
1547 	GBENU_STATS_P8(rx_broadcast_frames),
1548 	GBENU_STATS_P8(rx_multicast_frames),
1549 	GBENU_STATS_P8(rx_pause_frames),
1550 	GBENU_STATS_P8(rx_crc_errors),
1551 	GBENU_STATS_P8(rx_align_code_errors),
1552 	GBENU_STATS_P8(rx_oversized_frames),
1553 	GBENU_STATS_P8(rx_jabber_frames),
1554 	GBENU_STATS_P8(rx_undersized_frames),
1555 	GBENU_STATS_P8(rx_fragments),
1556 	GBENU_STATS_P8(ale_drop),
1557 	GBENU_STATS_P8(ale_overrun_drop),
1558 	GBENU_STATS_P8(rx_bytes),
1559 	GBENU_STATS_P8(tx_good_frames),
1560 	GBENU_STATS_P8(tx_broadcast_frames),
1561 	GBENU_STATS_P8(tx_multicast_frames),
1562 	GBENU_STATS_P8(tx_pause_frames),
1563 	GBENU_STATS_P8(tx_deferred_frames),
1564 	GBENU_STATS_P8(tx_collision_frames),
1565 	GBENU_STATS_P8(tx_single_coll_frames),
1566 	GBENU_STATS_P8(tx_mult_coll_frames),
1567 	GBENU_STATS_P8(tx_excessive_collisions),
1568 	GBENU_STATS_P8(tx_late_collisions),
1569 	GBENU_STATS_P8(rx_ipg_error),
1570 	GBENU_STATS_P8(tx_carrier_sense_errors),
1571 	GBENU_STATS_P8(tx_bytes),
1572 	GBENU_STATS_P8(tx_64B_frames),
1573 	GBENU_STATS_P8(tx_65_to_127B_frames),
1574 	GBENU_STATS_P8(tx_128_to_255B_frames),
1575 	GBENU_STATS_P8(tx_256_to_511B_frames),
1576 	GBENU_STATS_P8(tx_512_to_1023B_frames),
1577 	GBENU_STATS_P8(tx_1024B_frames),
1578 	GBENU_STATS_P8(net_bytes),
1579 	GBENU_STATS_P8(rx_bottom_fifo_drop),
1580 	GBENU_STATS_P8(rx_port_mask_drop),
1581 	GBENU_STATS_P8(rx_top_fifo_drop),
1582 	GBENU_STATS_P8(ale_rate_limit_drop),
1583 	GBENU_STATS_P8(ale_vid_ingress_drop),
1584 	GBENU_STATS_P8(ale_da_eq_sa_drop),
1585 	GBENU_STATS_P8(ale_unknown_ucast),
1586 	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1587 	GBENU_STATS_P8(ale_unknown_mcast),
1588 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1589 	GBENU_STATS_P8(ale_unknown_bcast),
1590 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1591 	GBENU_STATS_P8(ale_pol_match),
1592 	GBENU_STATS_P8(ale_pol_match_red),
1593 	GBENU_STATS_P8(ale_pol_match_yellow),
1594 	GBENU_STATS_P8(tx_mem_protect_err),
1595 	GBENU_STATS_P8(tx_pri0_drop),
1596 	GBENU_STATS_P8(tx_pri1_drop),
1597 	GBENU_STATS_P8(tx_pri2_drop),
1598 	GBENU_STATS_P8(tx_pri3_drop),
1599 	GBENU_STATS_P8(tx_pri4_drop),
1600 	GBENU_STATS_P8(tx_pri5_drop),
1601 	GBENU_STATS_P8(tx_pri6_drop),
1602 	GBENU_STATS_P8(tx_pri7_drop),
1603 	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1604 	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1605 	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1606 	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1607 	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1608 	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1609 	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1610 	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1611 };
1612 
1613 #define XGBE_STATS0_INFO(field)				\
1614 {							\
1615 	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1616 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1617 	offsetof(struct xgbe_hw_stats, field)		\
1618 }
1619 
1620 #define XGBE_STATS1_INFO(field)				\
1621 {							\
1622 	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1623 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1624 	offsetof(struct xgbe_hw_stats, field)		\
1625 }
1626 
1627 #define XGBE_STATS2_INFO(field)				\
1628 {							\
1629 	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1630 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1631 	offsetof(struct xgbe_hw_stats, field)		\
1632 }
1633 
1634 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1635 	/* GBE module 0 */
1636 	XGBE_STATS0_INFO(rx_good_frames),
1637 	XGBE_STATS0_INFO(rx_broadcast_frames),
1638 	XGBE_STATS0_INFO(rx_multicast_frames),
1639 	XGBE_STATS0_INFO(rx_oversized_frames),
1640 	XGBE_STATS0_INFO(rx_undersized_frames),
1641 	XGBE_STATS0_INFO(overrun_type4),
1642 	XGBE_STATS0_INFO(overrun_type5),
1643 	XGBE_STATS0_INFO(rx_bytes),
1644 	XGBE_STATS0_INFO(tx_good_frames),
1645 	XGBE_STATS0_INFO(tx_broadcast_frames),
1646 	XGBE_STATS0_INFO(tx_multicast_frames),
1647 	XGBE_STATS0_INFO(tx_bytes),
1648 	XGBE_STATS0_INFO(tx_64byte_frames),
1649 	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1650 	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1651 	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1652 	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1653 	XGBE_STATS0_INFO(tx_1024byte_frames),
1654 	XGBE_STATS0_INFO(net_bytes),
1655 	XGBE_STATS0_INFO(rx_sof_overruns),
1656 	XGBE_STATS0_INFO(rx_mof_overruns),
1657 	XGBE_STATS0_INFO(rx_dma_overruns),
1658 	/* XGBE module 1 */
1659 	XGBE_STATS1_INFO(rx_good_frames),
1660 	XGBE_STATS1_INFO(rx_broadcast_frames),
1661 	XGBE_STATS1_INFO(rx_multicast_frames),
1662 	XGBE_STATS1_INFO(rx_pause_frames),
1663 	XGBE_STATS1_INFO(rx_crc_errors),
1664 	XGBE_STATS1_INFO(rx_align_code_errors),
1665 	XGBE_STATS1_INFO(rx_oversized_frames),
1666 	XGBE_STATS1_INFO(rx_jabber_frames),
1667 	XGBE_STATS1_INFO(rx_undersized_frames),
1668 	XGBE_STATS1_INFO(rx_fragments),
1669 	XGBE_STATS1_INFO(overrun_type4),
1670 	XGBE_STATS1_INFO(overrun_type5),
1671 	XGBE_STATS1_INFO(rx_bytes),
1672 	XGBE_STATS1_INFO(tx_good_frames),
1673 	XGBE_STATS1_INFO(tx_broadcast_frames),
1674 	XGBE_STATS1_INFO(tx_multicast_frames),
1675 	XGBE_STATS1_INFO(tx_pause_frames),
1676 	XGBE_STATS1_INFO(tx_deferred_frames),
1677 	XGBE_STATS1_INFO(tx_collision_frames),
1678 	XGBE_STATS1_INFO(tx_single_coll_frames),
1679 	XGBE_STATS1_INFO(tx_mult_coll_frames),
1680 	XGBE_STATS1_INFO(tx_excessive_collisions),
1681 	XGBE_STATS1_INFO(tx_late_collisions),
1682 	XGBE_STATS1_INFO(tx_underrun),
1683 	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1684 	XGBE_STATS1_INFO(tx_bytes),
1685 	XGBE_STATS1_INFO(tx_64byte_frames),
1686 	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1687 	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1688 	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1689 	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1690 	XGBE_STATS1_INFO(tx_1024byte_frames),
1691 	XGBE_STATS1_INFO(net_bytes),
1692 	XGBE_STATS1_INFO(rx_sof_overruns),
1693 	XGBE_STATS1_INFO(rx_mof_overruns),
1694 	XGBE_STATS1_INFO(rx_dma_overruns),
1695 	/* XGBE module 2 */
1696 	XGBE_STATS2_INFO(rx_good_frames),
1697 	XGBE_STATS2_INFO(rx_broadcast_frames),
1698 	XGBE_STATS2_INFO(rx_multicast_frames),
1699 	XGBE_STATS2_INFO(rx_pause_frames),
1700 	XGBE_STATS2_INFO(rx_crc_errors),
1701 	XGBE_STATS2_INFO(rx_align_code_errors),
1702 	XGBE_STATS2_INFO(rx_oversized_frames),
1703 	XGBE_STATS2_INFO(rx_jabber_frames),
1704 	XGBE_STATS2_INFO(rx_undersized_frames),
1705 	XGBE_STATS2_INFO(rx_fragments),
1706 	XGBE_STATS2_INFO(overrun_type4),
1707 	XGBE_STATS2_INFO(overrun_type5),
1708 	XGBE_STATS2_INFO(rx_bytes),
1709 	XGBE_STATS2_INFO(tx_good_frames),
1710 	XGBE_STATS2_INFO(tx_broadcast_frames),
1711 	XGBE_STATS2_INFO(tx_multicast_frames),
1712 	XGBE_STATS2_INFO(tx_pause_frames),
1713 	XGBE_STATS2_INFO(tx_deferred_frames),
1714 	XGBE_STATS2_INFO(tx_collision_frames),
1715 	XGBE_STATS2_INFO(tx_single_coll_frames),
1716 	XGBE_STATS2_INFO(tx_mult_coll_frames),
1717 	XGBE_STATS2_INFO(tx_excessive_collisions),
1718 	XGBE_STATS2_INFO(tx_late_collisions),
1719 	XGBE_STATS2_INFO(tx_underrun),
1720 	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1721 	XGBE_STATS2_INFO(tx_bytes),
1722 	XGBE_STATS2_INFO(tx_64byte_frames),
1723 	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1724 	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1725 	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1726 	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1727 	XGBE_STATS2_INFO(tx_1024byte_frames),
1728 	XGBE_STATS2_INFO(net_bytes),
1729 	XGBE_STATS2_INFO(rx_sof_overruns),
1730 	XGBE_STATS2_INFO(rx_mof_overruns),
1731 	XGBE_STATS2_INFO(rx_dma_overruns),
1732 };
1733 
1734 #define for_each_intf(i, priv) \
1735 	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1736 
1737 #define for_each_sec_slave(slave, priv) \
1738 	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1739 
1740 #define first_sec_slave(priv)					\
1741 	list_first_entry(&priv->secondary_slaves, \
1742 			struct gbe_slave, slave_list)
1743 
1744 static void keystone_get_drvinfo(struct net_device *ndev,
1745 				 struct ethtool_drvinfo *info)
1746 {
1747 	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1748 	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1749 }
1750 
1751 static u32 keystone_get_msglevel(struct net_device *ndev)
1752 {
1753 	struct netcp_intf *netcp = netdev_priv(ndev);
1754 
1755 	return netcp->msg_enable;
1756 }
1757 
1758 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1759 {
1760 	struct netcp_intf *netcp = netdev_priv(ndev);
1761 
1762 	netcp->msg_enable = value;
1763 }
1764 
1765 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1766 {
1767 	struct gbe_intf *gbe_intf;
1768 
1769 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1770 	if (!gbe_intf)
1771 		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1772 
1773 	return gbe_intf;
1774 }
1775 
1776 static void keystone_get_stat_strings(struct net_device *ndev,
1777 				      uint32_t stringset, uint8_t *data)
1778 {
1779 	struct netcp_intf *netcp = netdev_priv(ndev);
1780 	struct gbe_intf *gbe_intf;
1781 	struct gbe_priv *gbe_dev;
1782 	int i;
1783 
1784 	gbe_intf = keystone_get_intf_data(netcp);
1785 	if (!gbe_intf)
1786 		return;
1787 	gbe_dev = gbe_intf->gbe_dev;
1788 
1789 	switch (stringset) {
1790 	case ETH_SS_STATS:
1791 		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1792 			memcpy(data, gbe_dev->et_stats[i].desc,
1793 			       ETH_GSTRING_LEN);
1794 			data += ETH_GSTRING_LEN;
1795 		}
1796 		break;
1797 	case ETH_SS_TEST:
1798 		break;
1799 	}
1800 }
1801 
1802 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1803 {
1804 	struct netcp_intf *netcp = netdev_priv(ndev);
1805 	struct gbe_intf *gbe_intf;
1806 	struct gbe_priv *gbe_dev;
1807 
1808 	gbe_intf = keystone_get_intf_data(netcp);
1809 	if (!gbe_intf)
1810 		return -EINVAL;
1811 	gbe_dev = gbe_intf->gbe_dev;
1812 
1813 	switch (stringset) {
1814 	case ETH_SS_TEST:
1815 		return 0;
1816 	case ETH_SS_STATS:
1817 		return gbe_dev->num_et_stats;
1818 	default:
1819 		return -EINVAL;
1820 	}
1821 }
1822 
1823 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1824 {
1825 	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1826 	u32  __iomem *p_stats_entry;
1827 	int i;
1828 
1829 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1830 		if (gbe_dev->et_stats[i].type == stats_mod) {
1831 			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1832 			gbe_dev->hw_stats[i] = 0;
1833 			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1834 		}
1835 	}
1836 }
1837 
1838 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1839 					     int et_stats_entry)
1840 {
1841 	void __iomem *base = NULL;
1842 	u32  __iomem *p_stats_entry;
1843 	u32 curr, delta;
1844 
1845 	/* The hw_stats_regs pointers are already
1846 	 * properly set to point to the right base:
1847 	 */
1848 	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1849 	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1850 	curr = readl(p_stats_entry);
1851 	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1852 	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1853 	gbe_dev->hw_stats[et_stats_entry] += delta;
1854 }
1855 
1856 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1857 {
1858 	int i;
1859 
1860 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1861 		gbe_update_hw_stats_entry(gbe_dev, i);
1862 
1863 		if (data)
1864 			data[i] = gbe_dev->hw_stats[i];
1865 	}
1866 }
1867 
1868 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1869 					       int stats_mod)
1870 {
1871 	u32 val;
1872 
1873 	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1874 
1875 	switch (stats_mod) {
1876 	case GBE_STATSA_MODULE:
1877 	case GBE_STATSB_MODULE:
1878 		val &= ~GBE_STATS_CD_SEL;
1879 		break;
1880 	case GBE_STATSC_MODULE:
1881 	case GBE_STATSD_MODULE:
1882 		val |= GBE_STATS_CD_SEL;
1883 		break;
1884 	default:
1885 		return;
1886 	}
1887 
1888 	/* make the stat module visible */
1889 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1890 }
1891 
1892 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1893 {
1894 	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1895 	gbe_reset_mod_stats(gbe_dev, stats_mod);
1896 }
1897 
1898 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1899 {
1900 	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1901 	int et_entry, j, pair;
1902 
1903 	for (pair = 0; pair < 2; pair++) {
1904 		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1905 						      GBE_STATSC_MODULE :
1906 						      GBE_STATSA_MODULE));
1907 
1908 		for (j = 0; j < half_num_et_stats; j++) {
1909 			et_entry = pair * half_num_et_stats + j;
1910 			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1911 
1912 			if (data)
1913 				data[et_entry] = gbe_dev->hw_stats[et_entry];
1914 		}
1915 	}
1916 }
1917 
1918 static void keystone_get_ethtool_stats(struct net_device *ndev,
1919 				       struct ethtool_stats *stats,
1920 				       uint64_t *data)
1921 {
1922 	struct netcp_intf *netcp = netdev_priv(ndev);
1923 	struct gbe_intf *gbe_intf;
1924 	struct gbe_priv *gbe_dev;
1925 
1926 	gbe_intf = keystone_get_intf_data(netcp);
1927 	if (!gbe_intf)
1928 		return;
1929 
1930 	gbe_dev = gbe_intf->gbe_dev;
1931 	spin_lock_bh(&gbe_dev->hw_stats_lock);
1932 	if (IS_SS_ID_VER_14(gbe_dev))
1933 		gbe_update_stats_ver14(gbe_dev, data);
1934 	else
1935 		gbe_update_stats(gbe_dev, data);
1936 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1937 }
1938 
1939 static int keystone_get_link_ksettings(struct net_device *ndev,
1940 				       struct ethtool_link_ksettings *cmd)
1941 {
1942 	struct netcp_intf *netcp = netdev_priv(ndev);
1943 	struct phy_device *phy = ndev->phydev;
1944 	struct gbe_intf *gbe_intf;
1945 
1946 	if (!phy)
1947 		return -EINVAL;
1948 
1949 	gbe_intf = keystone_get_intf_data(netcp);
1950 	if (!gbe_intf)
1951 		return -EINVAL;
1952 
1953 	if (!gbe_intf->slave)
1954 		return -EINVAL;
1955 
1956 	phy_ethtool_ksettings_get(phy, cmd);
1957 	cmd->base.port = gbe_intf->slave->phy_port_t;
1958 
1959 	return 0;
1960 }
1961 
1962 static int keystone_set_link_ksettings(struct net_device *ndev,
1963 				       const struct ethtool_link_ksettings *cmd)
1964 {
1965 	struct netcp_intf *netcp = netdev_priv(ndev);
1966 	struct phy_device *phy = ndev->phydev;
1967 	struct gbe_intf *gbe_intf;
1968 	u8 port = cmd->base.port;
1969 	u32 advertising, supported;
1970 	u32 features;
1971 
1972 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1973 						cmd->link_modes.advertising);
1974 	ethtool_convert_link_mode_to_legacy_u32(&supported,
1975 						cmd->link_modes.supported);
1976 	features = advertising & supported;
1977 
1978 	if (!phy)
1979 		return -EINVAL;
1980 
1981 	gbe_intf = keystone_get_intf_data(netcp);
1982 	if (!gbe_intf)
1983 		return -EINVAL;
1984 
1985 	if (!gbe_intf->slave)
1986 		return -EINVAL;
1987 
1988 	if (port != gbe_intf->slave->phy_port_t) {
1989 		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1990 			return -EINVAL;
1991 
1992 		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1993 			return -EINVAL;
1994 
1995 		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1996 			return -EINVAL;
1997 
1998 		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1999 			return -EINVAL;
2000 
2001 		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
2002 			return -EINVAL;
2003 	}
2004 
2005 	gbe_intf->slave->phy_port_t = port;
2006 	return phy_ethtool_ksettings_set(phy, cmd);
2007 }
2008 
2009 #if IS_ENABLED(CONFIG_TI_CPTS)
2010 static int keystone_get_ts_info(struct net_device *ndev,
2011 				struct ethtool_ts_info *info)
2012 {
2013 	struct netcp_intf *netcp = netdev_priv(ndev);
2014 	struct gbe_intf *gbe_intf;
2015 
2016 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2017 	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2018 		return -EINVAL;
2019 
2020 	info->so_timestamping =
2021 		SOF_TIMESTAMPING_TX_HARDWARE |
2022 		SOF_TIMESTAMPING_TX_SOFTWARE |
2023 		SOF_TIMESTAMPING_RX_HARDWARE |
2024 		SOF_TIMESTAMPING_RX_SOFTWARE |
2025 		SOF_TIMESTAMPING_SOFTWARE |
2026 		SOF_TIMESTAMPING_RAW_HARDWARE;
2027 	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2028 	info->tx_types =
2029 		(1 << HWTSTAMP_TX_OFF) |
2030 		(1 << HWTSTAMP_TX_ON);
2031 	info->rx_filters =
2032 		(1 << HWTSTAMP_FILTER_NONE) |
2033 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2034 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2035 	return 0;
2036 }
2037 #else
2038 static int keystone_get_ts_info(struct net_device *ndev,
2039 				struct ethtool_ts_info *info)
2040 {
2041 	info->so_timestamping =
2042 		SOF_TIMESTAMPING_TX_SOFTWARE |
2043 		SOF_TIMESTAMPING_RX_SOFTWARE |
2044 		SOF_TIMESTAMPING_SOFTWARE;
2045 	info->phc_index = -1;
2046 	info->tx_types = 0;
2047 	info->rx_filters = 0;
2048 	return 0;
2049 }
2050 #endif /* CONFIG_TI_CPTS */
2051 
2052 static const struct ethtool_ops keystone_ethtool_ops = {
2053 	.get_drvinfo		= keystone_get_drvinfo,
2054 	.get_link		= ethtool_op_get_link,
2055 	.get_msglevel		= keystone_get_msglevel,
2056 	.set_msglevel		= keystone_set_msglevel,
2057 	.get_strings		= keystone_get_stat_strings,
2058 	.get_sset_count		= keystone_get_sset_count,
2059 	.get_ethtool_stats	= keystone_get_ethtool_stats,
2060 	.get_link_ksettings	= keystone_get_link_ksettings,
2061 	.set_link_ksettings	= keystone_set_link_ksettings,
2062 	.get_ts_info		= keystone_get_ts_info,
2063 };
2064 
2065 static void gbe_set_slave_mac(struct gbe_slave *slave,
2066 			      struct gbe_intf *gbe_intf)
2067 {
2068 	struct net_device *ndev = gbe_intf->ndev;
2069 
2070 	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2071 	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2072 }
2073 
2074 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2075 {
2076 	if (priv->host_port == 0)
2077 		return slave_num + 1;
2078 
2079 	return slave_num;
2080 }
2081 
2082 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2083 					  struct net_device *ndev,
2084 					  struct gbe_slave *slave,
2085 					  int up)
2086 {
2087 	struct phy_device *phy = slave->phy;
2088 	u32 mac_control = 0;
2089 
2090 	if (up) {
2091 		mac_control = slave->mac_control;
2092 		if (phy && (phy->speed == SPEED_1000)) {
2093 			mac_control |= MACSL_GIG_MODE;
2094 			mac_control &= ~MACSL_XGIG_MODE;
2095 		} else if (phy && (phy->speed == SPEED_10000)) {
2096 			mac_control |= MACSL_XGIG_MODE;
2097 			mac_control &= ~MACSL_GIG_MODE;
2098 		}
2099 
2100 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2101 						 mac_control));
2102 
2103 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2104 				     ALE_PORT_STATE,
2105 				     ALE_PORT_STATE_FORWARD);
2106 
2107 		if (ndev && slave->open &&
2108 		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2109 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2110 		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2111 			netif_carrier_on(ndev);
2112 	} else {
2113 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2114 						 mac_control));
2115 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2116 				     ALE_PORT_STATE,
2117 				     ALE_PORT_STATE_DISABLE);
2118 		if (ndev &&
2119 		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2120 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2121 		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2122 			netif_carrier_off(ndev);
2123 	}
2124 
2125 	if (phy)
2126 		phy_print_status(phy);
2127 }
2128 
2129 static bool gbe_phy_link_status(struct gbe_slave *slave)
2130 {
2131 	 return !slave->phy || slave->phy->link;
2132 }
2133 
2134 #define RGMII_REG_STATUS_LINK	BIT(0)
2135 
2136 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2137 {
2138 	u32 val = 0;
2139 
2140 	val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2141 	*status = !!(val & RGMII_REG_STATUS_LINK);
2142 }
2143 
2144 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2145 					  struct gbe_slave *slave,
2146 					  struct net_device *ndev)
2147 {
2148 	bool sw_link_state = true, phy_link_state;
2149 	int sp = slave->slave_num, link_state;
2150 
2151 	if (!slave->open)
2152 		return;
2153 
2154 	if (SLAVE_LINK_IS_RGMII(slave))
2155 		netcp_2u_rgmii_get_port_link(gbe_dev,
2156 					     &sw_link_state);
2157 	if (SLAVE_LINK_IS_SGMII(slave))
2158 		sw_link_state =
2159 		netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2160 
2161 	phy_link_state = gbe_phy_link_status(slave);
2162 	link_state = phy_link_state & sw_link_state;
2163 
2164 	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2165 		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2166 					      link_state);
2167 }
2168 
2169 static void xgbe_adjust_link(struct net_device *ndev)
2170 {
2171 	struct netcp_intf *netcp = netdev_priv(ndev);
2172 	struct gbe_intf *gbe_intf;
2173 
2174 	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2175 	if (!gbe_intf)
2176 		return;
2177 
2178 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2179 				      ndev);
2180 }
2181 
2182 static void gbe_adjust_link(struct net_device *ndev)
2183 {
2184 	struct netcp_intf *netcp = netdev_priv(ndev);
2185 	struct gbe_intf *gbe_intf;
2186 
2187 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2188 	if (!gbe_intf)
2189 		return;
2190 
2191 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2192 				      ndev);
2193 }
2194 
2195 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2196 {
2197 	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2198 	struct gbe_slave *slave;
2199 
2200 	for_each_sec_slave(slave, gbe_dev)
2201 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2202 }
2203 
2204 /* Reset EMAC
2205  * Soft reset is set and polled until clear, or until a timeout occurs
2206  */
2207 static int gbe_port_reset(struct gbe_slave *slave)
2208 {
2209 	u32 i, v;
2210 
2211 	/* Set the soft reset bit */
2212 	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2213 
2214 	/* Wait for the bit to clear */
2215 	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2216 		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2217 		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2218 			return 0;
2219 	}
2220 
2221 	/* Timeout on the reset */
2222 	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2223 }
2224 
2225 /* Configure EMAC */
2226 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2227 			    int max_rx_len)
2228 {
2229 	void __iomem *rx_maxlen_reg;
2230 	u32 xgmii_mode;
2231 
2232 	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2233 		max_rx_len = NETCP_MAX_FRAME_SIZE;
2234 
2235 	/* Enable correct MII mode at SS level */
2236 	if (IS_SS_ID_XGBE(gbe_dev) &&
2237 	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2238 		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2239 		xgmii_mode |= (1 << slave->slave_num);
2240 		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2241 	}
2242 
2243 	if (IS_SS_ID_MU(gbe_dev))
2244 		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2245 	else
2246 		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2247 
2248 	writel(max_rx_len, rx_maxlen_reg);
2249 	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2250 }
2251 
2252 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2253 			      struct gbe_slave *slave, bool set)
2254 {
2255 	if (SLAVE_LINK_IS_XGMII(slave))
2256 		return;
2257 
2258 	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2259 			    slave->slave_num, set);
2260 }
2261 
2262 static void gbe_slave_stop(struct gbe_intf *intf)
2263 {
2264 	struct gbe_priv *gbe_dev = intf->gbe_dev;
2265 	struct gbe_slave *slave = intf->slave;
2266 
2267 	if (!IS_SS_ID_2U(gbe_dev))
2268 		gbe_sgmii_rtreset(gbe_dev, slave, true);
2269 	gbe_port_reset(slave);
2270 	/* Disable forwarding */
2271 	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2272 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2273 	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2274 			   1 << slave->port_num, 0, 0);
2275 
2276 	if (!slave->phy)
2277 		return;
2278 
2279 	phy_stop(slave->phy);
2280 	phy_disconnect(slave->phy);
2281 	slave->phy = NULL;
2282 }
2283 
2284 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2285 {
2286 	if (SLAVE_LINK_IS_XGMII(slave))
2287 		return;
2288 
2289 	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2290 	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2291 			   slave->link_interface);
2292 }
2293 
2294 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2295 {
2296 	struct gbe_priv *priv = gbe_intf->gbe_dev;
2297 	struct gbe_slave *slave = gbe_intf->slave;
2298 	phy_interface_t phy_mode;
2299 	bool has_phy = false;
2300 
2301 	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2302 
2303 	if (!IS_SS_ID_2U(priv))
2304 		gbe_sgmii_config(priv, slave);
2305 	gbe_port_reset(slave);
2306 	if (!IS_SS_ID_2U(priv))
2307 		gbe_sgmii_rtreset(priv, slave, false);
2308 	gbe_port_config(priv, slave, priv->rx_packet_max);
2309 	gbe_set_slave_mac(slave, gbe_intf);
2310 	/* For NU & 2U switch, map the vlan priorities to zero
2311 	 * as we only configure to use priority 0
2312 	 */
2313 	if (IS_SS_ID_MU(priv))
2314 		writel(HOST_TX_PRI_MAP_DEFAULT,
2315 		       GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2316 
2317 	/* enable forwarding */
2318 	cpsw_ale_control_set(priv->ale, slave->port_num,
2319 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2320 	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2321 			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2322 
2323 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2324 		has_phy = true;
2325 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2326 		slave->phy_port_t = PORT_MII;
2327 	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2328 		has_phy = true;
2329 		phy_mode = of_get_phy_mode(slave->node);
2330 		/* if phy-mode is not present, default to
2331 		 * PHY_INTERFACE_MODE_RGMII
2332 		 */
2333 		if (phy_mode < 0)
2334 			phy_mode = PHY_INTERFACE_MODE_RGMII;
2335 
2336 		if (!phy_interface_mode_is_rgmii(phy_mode)) {
2337 			dev_err(priv->dev,
2338 				"Unsupported phy mode %d\n", phy_mode);
2339 			return -EINVAL;
2340 		}
2341 		slave->phy_port_t = PORT_MII;
2342 	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2343 		has_phy = true;
2344 		phy_mode = PHY_INTERFACE_MODE_NA;
2345 		slave->phy_port_t = PORT_FIBRE;
2346 	}
2347 
2348 	if (has_phy) {
2349 		if (IS_SS_ID_XGBE(priv))
2350 			hndlr = xgbe_adjust_link;
2351 
2352 		slave->phy = of_phy_connect(gbe_intf->ndev,
2353 					    slave->phy_node,
2354 					    hndlr, 0,
2355 					    phy_mode);
2356 		if (!slave->phy) {
2357 			dev_err(priv->dev, "phy not found on slave %d\n",
2358 				slave->slave_num);
2359 			return -ENODEV;
2360 		}
2361 		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2362 			phydev_name(slave->phy));
2363 		phy_start(slave->phy);
2364 	}
2365 	return 0;
2366 }
2367 
2368 static void gbe_init_host_port(struct gbe_priv *priv)
2369 {
2370 	int bypass_en = 1;
2371 
2372 	/* Host Tx Pri */
2373 	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2374 		writel(HOST_TX_PRI_MAP_DEFAULT,
2375 		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2376 
2377 	/* Max length register */
2378 	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2379 						  rx_maxlen));
2380 
2381 	cpsw_ale_start(priv->ale);
2382 
2383 	if (priv->enable_ale)
2384 		bypass_en = 0;
2385 
2386 	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2387 
2388 	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2389 
2390 	cpsw_ale_control_set(priv->ale, priv->host_port,
2391 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2392 
2393 	cpsw_ale_control_set(priv->ale, 0,
2394 			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2395 			     GBE_PORT_MASK(priv->ale_ports));
2396 
2397 	cpsw_ale_control_set(priv->ale, 0,
2398 			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2399 			     GBE_PORT_MASK(priv->ale_ports - 1));
2400 
2401 	cpsw_ale_control_set(priv->ale, 0,
2402 			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2403 			     GBE_PORT_MASK(priv->ale_ports));
2404 
2405 	cpsw_ale_control_set(priv->ale, 0,
2406 			     ALE_PORT_UNTAGGED_EGRESS,
2407 			     GBE_PORT_MASK(priv->ale_ports));
2408 }
2409 
2410 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2411 {
2412 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2413 	u16 vlan_id;
2414 
2415 	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2416 			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2417 			   ALE_MCAST_FWD_2);
2418 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2419 		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2420 				   GBE_PORT_MASK(gbe_dev->ale_ports),
2421 				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2422 	}
2423 }
2424 
2425 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2426 {
2427 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2428 	u16 vlan_id;
2429 
2430 	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2431 
2432 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2433 		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2434 				   ALE_VLAN, vlan_id);
2435 }
2436 
2437 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2438 {
2439 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2440 	u16 vlan_id;
2441 
2442 	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2443 
2444 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2445 		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2446 	}
2447 }
2448 
2449 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2450 {
2451 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2452 	u16 vlan_id;
2453 
2454 	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2455 
2456 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2457 		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2458 				   ALE_VLAN, vlan_id);
2459 	}
2460 }
2461 
2462 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2463 {
2464 	struct gbe_intf *gbe_intf = intf_priv;
2465 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2466 
2467 	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2468 		naddr->addr, naddr->type);
2469 
2470 	switch (naddr->type) {
2471 	case ADDR_MCAST:
2472 	case ADDR_BCAST:
2473 		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2474 		break;
2475 	case ADDR_UCAST:
2476 	case ADDR_DEV:
2477 		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2478 		break;
2479 	case ADDR_ANY:
2480 		/* nothing to do for promiscuous */
2481 	default:
2482 		break;
2483 	}
2484 
2485 	return 0;
2486 }
2487 
2488 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2489 {
2490 	struct gbe_intf *gbe_intf = intf_priv;
2491 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2492 
2493 	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2494 		naddr->addr, naddr->type);
2495 
2496 	switch (naddr->type) {
2497 	case ADDR_MCAST:
2498 	case ADDR_BCAST:
2499 		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2500 		break;
2501 	case ADDR_UCAST:
2502 	case ADDR_DEV:
2503 		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2504 		break;
2505 	case ADDR_ANY:
2506 		/* nothing to do for promiscuous */
2507 	default:
2508 		break;
2509 	}
2510 
2511 	return 0;
2512 }
2513 
2514 static int gbe_add_vid(void *intf_priv, int vid)
2515 {
2516 	struct gbe_intf *gbe_intf = intf_priv;
2517 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2518 
2519 	set_bit(vid, gbe_intf->active_vlans);
2520 
2521 	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2522 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2523 			  GBE_MASK_NO_PORTS,
2524 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2525 			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2526 
2527 	return 0;
2528 }
2529 
2530 static int gbe_del_vid(void *intf_priv, int vid)
2531 {
2532 	struct gbe_intf *gbe_intf = intf_priv;
2533 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2534 
2535 	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2536 	clear_bit(vid, gbe_intf->active_vlans);
2537 	return 0;
2538 }
2539 
2540 #if IS_ENABLED(CONFIG_TI_CPTS)
2541 #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2542 #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2543 
2544 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2545 {
2546 	struct gbe_intf *gbe_intf = context;
2547 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2548 
2549 	cpts_tx_timestamp(gbe_dev->cpts, skb);
2550 }
2551 
2552 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2553 			      const struct netcp_packet *p_info)
2554 {
2555 	struct sk_buff *skb = p_info->skb;
2556 
2557 	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2558 }
2559 
2560 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2561 				 struct netcp_packet *p_info)
2562 {
2563 	struct phy_device *phydev = p_info->skb->dev->phydev;
2564 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2565 
2566 	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2567 	    !cpts_is_tx_enabled(gbe_dev->cpts))
2568 		return 0;
2569 
2570 	/* If phy has the txtstamp api, assume it will do it.
2571 	 * We mark it here because skb_tx_timestamp() is called
2572 	 * after all the txhooks are called.
2573 	 */
2574 	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2575 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2576 		return 0;
2577 	}
2578 
2579 	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2580 		p_info->txtstamp = gbe_txtstamp;
2581 		p_info->ts_context = (void *)gbe_intf;
2582 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2583 	}
2584 
2585 	return 0;
2586 }
2587 
2588 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2589 {
2590 	struct phy_device *phydev = p_info->skb->dev->phydev;
2591 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2592 
2593 	if (p_info->rxtstamp_complete)
2594 		return 0;
2595 
2596 	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2597 		p_info->rxtstamp_complete = true;
2598 		return 0;
2599 	}
2600 
2601 	cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2602 	p_info->rxtstamp_complete = true;
2603 
2604 	return 0;
2605 }
2606 
2607 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2608 {
2609 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2610 	struct cpts *cpts = gbe_dev->cpts;
2611 	struct hwtstamp_config cfg;
2612 
2613 	if (!cpts)
2614 		return -EOPNOTSUPP;
2615 
2616 	cfg.flags = 0;
2617 	cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2618 		      HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2619 	cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2620 			 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2621 
2622 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2623 }
2624 
2625 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2626 {
2627 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2628 	struct gbe_slave *slave = gbe_intf->slave;
2629 	u32 ts_en, seq_id, ctl;
2630 
2631 	if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
2632 	    !cpts_is_tx_enabled(gbe_dev->cpts)) {
2633 		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2634 		return;
2635 	}
2636 
2637 	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2638 	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2639 	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2640 		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2641 		(slave->ts_ctl.uni ?  TS_UNI_EN :
2642 			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2643 
2644 	if (cpts_is_tx_enabled(gbe_dev->cpts))
2645 		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2646 
2647 	if (cpts_is_rx_enabled(gbe_dev->cpts))
2648 		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2649 
2650 	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2651 	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2652 	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2653 }
2654 
2655 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2656 {
2657 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2658 	struct cpts *cpts = gbe_dev->cpts;
2659 	struct hwtstamp_config cfg;
2660 
2661 	if (!cpts)
2662 		return -EOPNOTSUPP;
2663 
2664 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2665 		return -EFAULT;
2666 
2667 	/* reserved for future extensions */
2668 	if (cfg.flags)
2669 		return -EINVAL;
2670 
2671 	switch (cfg.tx_type) {
2672 	case HWTSTAMP_TX_OFF:
2673 		cpts_tx_enable(cpts, 0);
2674 		break;
2675 	case HWTSTAMP_TX_ON:
2676 		cpts_tx_enable(cpts, 1);
2677 		break;
2678 	default:
2679 		return -ERANGE;
2680 	}
2681 
2682 	switch (cfg.rx_filter) {
2683 	case HWTSTAMP_FILTER_NONE:
2684 		cpts_rx_enable(cpts, 0);
2685 		break;
2686 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2687 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2688 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2689 		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2690 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2691 		break;
2692 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2693 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2694 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2695 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2696 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2697 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2698 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2699 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2700 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2701 		cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2702 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2703 		break;
2704 	default:
2705 		return -ERANGE;
2706 	}
2707 
2708 	gbe_hwtstamp(gbe_intf);
2709 
2710 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2711 }
2712 
2713 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2714 {
2715 	if (!gbe_dev->cpts)
2716 		return;
2717 
2718 	if (gbe_dev->cpts_registered > 0)
2719 		goto done;
2720 
2721 	if (cpts_register(gbe_dev->cpts)) {
2722 		dev_err(gbe_dev->dev, "error registering cpts device\n");
2723 		return;
2724 	}
2725 
2726 done:
2727 	++gbe_dev->cpts_registered;
2728 }
2729 
2730 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2731 {
2732 	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2733 		return;
2734 
2735 	if (--gbe_dev->cpts_registered)
2736 		return;
2737 
2738 	cpts_unregister(gbe_dev->cpts);
2739 }
2740 #else
2741 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2742 					struct netcp_packet *p_info)
2743 {
2744 	return 0;
2745 }
2746 
2747 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2748 			       struct netcp_packet *p_info)
2749 {
2750 	return 0;
2751 }
2752 
2753 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2754 			       struct ifreq *ifr, int cmd)
2755 {
2756 	return -EOPNOTSUPP;
2757 }
2758 
2759 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2760 {
2761 }
2762 
2763 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2764 {
2765 }
2766 
2767 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2768 {
2769 	return -EOPNOTSUPP;
2770 }
2771 
2772 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2773 {
2774 	return -EOPNOTSUPP;
2775 }
2776 #endif /* CONFIG_TI_CPTS */
2777 
2778 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2779 {
2780 	struct gbe_intf *gbe_intf = intf_priv;
2781 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2782 	struct cpsw_ale *ale = gbe_dev->ale;
2783 	unsigned long timeout;
2784 	int i, ret = -ETIMEDOUT;
2785 
2786 	/* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2787 	 * slaves are port 1 and up
2788 	 */
2789 	for (i = 0; i <= gbe_dev->num_slaves; i++) {
2790 		cpsw_ale_control_set(ale, i,
2791 				     ALE_PORT_NOLEARN, !!promisc);
2792 		cpsw_ale_control_set(ale, i,
2793 				     ALE_PORT_NO_SA_UPDATE, !!promisc);
2794 	}
2795 
2796 	if (!promisc) {
2797 		/* Don't Flood All Unicast Packets to Host port */
2798 		cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2799 		dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2800 		return 0;
2801 	}
2802 
2803 	timeout = jiffies + HZ;
2804 
2805 	/* Clear All Untouched entries */
2806 	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2807 	do {
2808 		cpu_relax();
2809 		if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2810 			ret = 0;
2811 			break;
2812 		}
2813 
2814 	} while (time_after(timeout, jiffies));
2815 
2816 	/* Make sure it is not a false timeout */
2817 	if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2818 		return ret;
2819 
2820 	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2821 
2822 	/* Clear all mcast from ALE */
2823 	cpsw_ale_flush_multicast(ale,
2824 				 GBE_PORT_MASK(gbe_dev->ale_ports),
2825 				 -1);
2826 
2827 	/* Flood All Unicast Packets to Host port */
2828 	cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2829 	dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2830 	return ret;
2831 }
2832 
2833 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2834 {
2835 	struct gbe_intf *gbe_intf = intf_priv;
2836 	struct phy_device *phy = gbe_intf->slave->phy;
2837 
2838 	if (!phy || !phy->drv->hwtstamp) {
2839 		switch (cmd) {
2840 		case SIOCGHWTSTAMP:
2841 			return gbe_hwtstamp_get(gbe_intf, req);
2842 		case SIOCSHWTSTAMP:
2843 			return gbe_hwtstamp_set(gbe_intf, req);
2844 		}
2845 	}
2846 
2847 	if (phy)
2848 		return phy_mii_ioctl(phy, req, cmd);
2849 
2850 	return -EOPNOTSUPP;
2851 }
2852 
2853 static void netcp_ethss_timer(struct timer_list *t)
2854 {
2855 	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2856 	struct gbe_intf *gbe_intf;
2857 	struct gbe_slave *slave;
2858 
2859 	/* Check & update SGMII link state of interfaces */
2860 	for_each_intf(gbe_intf, gbe_dev) {
2861 		if (!gbe_intf->slave->open)
2862 			continue;
2863 		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2864 					      gbe_intf->ndev);
2865 	}
2866 
2867 	/* Check & update SGMII link state of secondary ports */
2868 	for_each_sec_slave(slave, gbe_dev) {
2869 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2870 	}
2871 
2872 	/* A timer runs as a BH, no need to block them */
2873 	spin_lock(&gbe_dev->hw_stats_lock);
2874 
2875 	if (IS_SS_ID_VER_14(gbe_dev))
2876 		gbe_update_stats_ver14(gbe_dev, NULL);
2877 	else
2878 		gbe_update_stats(gbe_dev, NULL);
2879 
2880 	spin_unlock(&gbe_dev->hw_stats_lock);
2881 
2882 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2883 	add_timer(&gbe_dev->timer);
2884 }
2885 
2886 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2887 {
2888 	struct gbe_intf *gbe_intf = data;
2889 
2890 	p_info->tx_pipe = &gbe_intf->tx_pipe;
2891 
2892 	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2893 }
2894 
2895 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2896 {
2897 	struct gbe_intf *gbe_intf = data;
2898 
2899 	return gbe_rxtstamp(gbe_intf, p_info);
2900 }
2901 
2902 static int gbe_open(void *intf_priv, struct net_device *ndev)
2903 {
2904 	struct gbe_intf *gbe_intf = intf_priv;
2905 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2906 	struct netcp_intf *netcp = netdev_priv(ndev);
2907 	struct gbe_slave *slave = gbe_intf->slave;
2908 	int port_num = slave->port_num;
2909 	u32 reg, val;
2910 	int ret;
2911 
2912 	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2913 	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2914 		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2915 		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2916 
2917 	/* For 10G and on NetCP 1.5, use directed to port */
2918 	if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2919 		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2920 
2921 	if (gbe_dev->enable_ale)
2922 		gbe_intf->tx_pipe.switch_to_port = 0;
2923 	else
2924 		gbe_intf->tx_pipe.switch_to_port = port_num;
2925 
2926 	dev_dbg(gbe_dev->dev,
2927 		"opened TX channel %s: %p with to port %d, flags %d\n",
2928 		gbe_intf->tx_pipe.dma_chan_name,
2929 		gbe_intf->tx_pipe.dma_channel,
2930 		gbe_intf->tx_pipe.switch_to_port,
2931 		gbe_intf->tx_pipe.flags);
2932 
2933 	gbe_slave_stop(gbe_intf);
2934 
2935 	/* disable priority elevation and enable statistics on all ports */
2936 	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2937 
2938 	/* Control register */
2939 	val = GBE_CTL_P0_ENABLE;
2940 	if (IS_SS_ID_MU(gbe_dev)) {
2941 		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2942 		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2943 	}
2944 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2945 
2946 	/* All statistics enabled and STAT AB visible by default */
2947 	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2948 						    stat_port_en));
2949 
2950 	ret = gbe_slave_open(gbe_intf);
2951 	if (ret)
2952 		goto fail;
2953 
2954 	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2955 	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2956 
2957 	slave->open = true;
2958 	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2959 
2960 	gbe_register_cpts(gbe_dev);
2961 
2962 	return 0;
2963 
2964 fail:
2965 	gbe_slave_stop(gbe_intf);
2966 	return ret;
2967 }
2968 
2969 static int gbe_close(void *intf_priv, struct net_device *ndev)
2970 {
2971 	struct gbe_intf *gbe_intf = intf_priv;
2972 	struct netcp_intf *netcp = netdev_priv(ndev);
2973 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2974 
2975 	gbe_unregister_cpts(gbe_dev);
2976 
2977 	gbe_slave_stop(gbe_intf);
2978 
2979 	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2980 	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2981 
2982 	gbe_intf->slave->open = false;
2983 	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2984 	return 0;
2985 }
2986 
2987 #if IS_ENABLED(CONFIG_TI_CPTS)
2988 static void init_slave_ts_ctl(struct gbe_slave *slave)
2989 {
2990 	slave->ts_ctl.uni = 1;
2991 	slave->ts_ctl.dst_port_map =
2992 		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2993 	slave->ts_ctl.maddr_map =
2994 		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2995 }
2996 
2997 #else
2998 static void init_slave_ts_ctl(struct gbe_slave *slave)
2999 {
3000 }
3001 #endif /* CONFIG_TI_CPTS */
3002 
3003 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
3004 		      struct device_node *node)
3005 {
3006 	int port_reg_num;
3007 	u32 port_reg_ofs, emac_reg_ofs;
3008 	u32 port_reg_blk_sz, emac_reg_blk_sz;
3009 
3010 	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3011 		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3012 		return -EINVAL;
3013 	}
3014 
3015 	if (of_property_read_u32(node, "link-interface",
3016 				 &slave->link_interface)) {
3017 		dev_warn(gbe_dev->dev,
3018 			 "missing link-interface value defaulting to 1G mac-phy link\n");
3019 		slave->link_interface = SGMII_LINK_MAC_PHY;
3020 	}
3021 
3022 	slave->node = node;
3023 	slave->open = false;
3024 	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3025 	    (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3026 	    (slave->link_interface == XGMII_LINK_MAC_PHY))
3027 		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3028 	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3029 
3030 	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3031 		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3032 	else
3033 		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3034 
3035 	/* Emac regs memmap are contiguous but port regs are not */
3036 	port_reg_num = slave->slave_num;
3037 	if (IS_SS_ID_VER_14(gbe_dev)) {
3038 		if (slave->slave_num > 1) {
3039 			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3040 			port_reg_num -= 2;
3041 		} else {
3042 			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3043 		}
3044 		emac_reg_ofs = GBE13_EMAC_OFFSET;
3045 		port_reg_blk_sz = 0x30;
3046 		emac_reg_blk_sz = 0x40;
3047 	} else if (IS_SS_ID_MU(gbe_dev)) {
3048 		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3049 		emac_reg_ofs = GBENU_EMAC_OFFSET;
3050 		port_reg_blk_sz = 0x1000;
3051 		emac_reg_blk_sz = 0x1000;
3052 	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3053 		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3054 		emac_reg_ofs = XGBE10_EMAC_OFFSET;
3055 		port_reg_blk_sz = 0x30;
3056 		emac_reg_blk_sz = 0x40;
3057 	} else {
3058 		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3059 			gbe_dev->ss_version);
3060 		return -EINVAL;
3061 	}
3062 
3063 	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3064 				(port_reg_blk_sz * port_reg_num);
3065 	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3066 				(emac_reg_blk_sz * slave->slave_num);
3067 
3068 	if (IS_SS_ID_VER_14(gbe_dev)) {
3069 		/* Initialize  slave port register offsets */
3070 		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3071 		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3072 		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3073 		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3074 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3075 		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3076 		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3077 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3078 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3079 
3080 		/* Initialize EMAC register offsets */
3081 		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3082 		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3083 		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3084 
3085 	} else if (IS_SS_ID_MU(gbe_dev)) {
3086 		/* Initialize  slave port register offsets */
3087 		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3088 		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3089 		GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3090 		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3091 		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3092 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3093 		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3094 		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3095 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3096 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3097 		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3098 
3099 		/* Initialize EMAC register offsets */
3100 		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3101 		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3102 
3103 	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3104 		/* Initialize  slave port register offsets */
3105 		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3106 		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3107 		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3108 		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3109 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3110 		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3111 		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3112 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3113 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3114 
3115 		/* Initialize EMAC register offsets */
3116 		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3117 		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3118 		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3119 	}
3120 
3121 	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3122 
3123 	init_slave_ts_ctl(slave);
3124 	return 0;
3125 }
3126 
3127 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3128 				 struct device_node *node)
3129 {
3130 	struct device *dev = gbe_dev->dev;
3131 	phy_interface_t phy_mode;
3132 	struct gbe_priv **priv;
3133 	struct device_node *port;
3134 	struct gbe_slave *slave;
3135 	bool mac_phy_link = false;
3136 
3137 	for_each_child_of_node(node, port) {
3138 		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3139 		if (!slave) {
3140 			dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
3141 				port->name);
3142 			continue;
3143 		}
3144 
3145 		if (init_slave(gbe_dev, slave, port)) {
3146 			dev_err(dev,
3147 				"Failed to initialize secondary port(%s), skipping...\n",
3148 				port->name);
3149 			devm_kfree(dev, slave);
3150 			continue;
3151 		}
3152 
3153 		if (!IS_SS_ID_2U(gbe_dev))
3154 			gbe_sgmii_config(gbe_dev, slave);
3155 		gbe_port_reset(slave);
3156 		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3157 		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3158 		gbe_dev->num_slaves++;
3159 		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3160 		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3161 			mac_phy_link = true;
3162 
3163 		slave->open = true;
3164 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3165 			of_node_put(port);
3166 			break;
3167 		}
3168 	}
3169 
3170 	/* of_phy_connect() is needed only for MAC-PHY interface */
3171 	if (!mac_phy_link)
3172 		return;
3173 
3174 	/* Allocate dummy netdev device for attaching to phy device */
3175 	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3176 					NET_NAME_UNKNOWN, ether_setup);
3177 	if (!gbe_dev->dummy_ndev) {
3178 		dev_err(dev,
3179 			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3180 		return;
3181 	}
3182 	priv = netdev_priv(gbe_dev->dummy_ndev);
3183 	*priv = gbe_dev;
3184 
3185 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3186 		phy_mode = PHY_INTERFACE_MODE_SGMII;
3187 		slave->phy_port_t = PORT_MII;
3188 	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3189 		phy_mode = PHY_INTERFACE_MODE_RGMII;
3190 		slave->phy_port_t = PORT_MII;
3191 	} else {
3192 		phy_mode = PHY_INTERFACE_MODE_NA;
3193 		slave->phy_port_t = PORT_FIBRE;
3194 	}
3195 
3196 	for_each_sec_slave(slave, gbe_dev) {
3197 		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3198 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3199 		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3200 			continue;
3201 		slave->phy =
3202 			of_phy_connect(gbe_dev->dummy_ndev,
3203 				       slave->phy_node,
3204 				       gbe_adjust_link_sec_slaves,
3205 				       0, phy_mode);
3206 		if (!slave->phy) {
3207 			dev_err(dev, "phy not found for slave %d\n",
3208 				slave->slave_num);
3209 		} else {
3210 			dev_dbg(dev, "phy found: id is: 0x%s\n",
3211 				phydev_name(slave->phy));
3212 			phy_start(slave->phy);
3213 		}
3214 	}
3215 }
3216 
3217 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3218 {
3219 	struct gbe_slave *slave;
3220 
3221 	while (!list_empty(&gbe_dev->secondary_slaves)) {
3222 		slave = first_sec_slave(gbe_dev);
3223 
3224 		if (slave->phy)
3225 			phy_disconnect(slave->phy);
3226 		list_del(&slave->slave_list);
3227 	}
3228 	if (gbe_dev->dummy_ndev)
3229 		free_netdev(gbe_dev->dummy_ndev);
3230 }
3231 
3232 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3233 				 struct device_node *node)
3234 {
3235 	struct resource res;
3236 	void __iomem *regs;
3237 	int ret, i;
3238 
3239 	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3240 	if (ret) {
3241 		dev_err(gbe_dev->dev,
3242 			"Can't xlate xgbe of node(%s) ss address at %d\n",
3243 			node->name, XGBE_SS_REG_INDEX);
3244 		return ret;
3245 	}
3246 
3247 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3248 	if (IS_ERR(regs)) {
3249 		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3250 		return PTR_ERR(regs);
3251 	}
3252 	gbe_dev->ss_regs = regs;
3253 
3254 	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3255 	if (ret) {
3256 		dev_err(gbe_dev->dev,
3257 			"Can't xlate xgbe of node(%s) sm address at %d\n",
3258 			node->name, XGBE_SM_REG_INDEX);
3259 		return ret;
3260 	}
3261 
3262 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3263 	if (IS_ERR(regs)) {
3264 		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3265 		return PTR_ERR(regs);
3266 	}
3267 	gbe_dev->switch_regs = regs;
3268 
3269 	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3270 	if (ret) {
3271 		dev_err(gbe_dev->dev,
3272 			"Can't xlate xgbe serdes of node(%s) address at %d\n",
3273 			node->name, XGBE_SERDES_REG_INDEX);
3274 		return ret;
3275 	}
3276 
3277 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3278 	if (IS_ERR(regs)) {
3279 		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3280 		return PTR_ERR(regs);
3281 	}
3282 	gbe_dev->xgbe_serdes_regs = regs;
3283 
3284 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3285 	gbe_dev->et_stats = xgbe10_et_stats;
3286 	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3287 
3288 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3289 					 gbe_dev->num_et_stats, sizeof(u64),
3290 					 GFP_KERNEL);
3291 	if (!gbe_dev->hw_stats) {
3292 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3293 		return -ENOMEM;
3294 	}
3295 
3296 	gbe_dev->hw_stats_prev =
3297 		devm_kcalloc(gbe_dev->dev,
3298 			     gbe_dev->num_et_stats, sizeof(u32),
3299 			     GFP_KERNEL);
3300 	if (!gbe_dev->hw_stats_prev) {
3301 		dev_err(gbe_dev->dev,
3302 			"hw_stats_prev memory allocation failed\n");
3303 		return -ENOMEM;
3304 	}
3305 
3306 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3307 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3308 					XGBE10_SGMII_MODULE_OFFSET;
3309 	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3310 
3311 	for (i = 0; i < gbe_dev->max_num_ports; i++)
3312 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3313 			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3314 
3315 	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3316 	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3317 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3318 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3319 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3320 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3321 
3322 	/* Subsystem registers */
3323 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3324 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3325 
3326 	/* Switch module registers */
3327 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3328 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3329 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3330 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3331 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3332 
3333 	/* Host port registers */
3334 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3335 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3336 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3337 	return 0;
3338 }
3339 
3340 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3341 				    struct device_node *node)
3342 {
3343 	struct resource res;
3344 	void __iomem *regs;
3345 	int ret;
3346 
3347 	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3348 	if (ret) {
3349 		dev_err(gbe_dev->dev,
3350 			"Can't translate of node(%s) of gbe ss address at %d\n",
3351 			node->name, GBE_SS_REG_INDEX);
3352 		return ret;
3353 	}
3354 
3355 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3356 	if (IS_ERR(regs)) {
3357 		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3358 		return PTR_ERR(regs);
3359 	}
3360 	gbe_dev->ss_regs = regs;
3361 	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3362 	return 0;
3363 }
3364 
3365 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3366 				struct device_node *node)
3367 {
3368 	struct resource res;
3369 	void __iomem *regs;
3370 	int i, ret;
3371 
3372 	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3373 	if (ret) {
3374 		dev_err(gbe_dev->dev,
3375 			"Can't translate of gbe node(%s) address at index %d\n",
3376 			node->name, GBE_SGMII34_REG_INDEX);
3377 		return ret;
3378 	}
3379 
3380 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3381 	if (IS_ERR(regs)) {
3382 		dev_err(gbe_dev->dev,
3383 			"Failed to map gbe sgmii port34 register base\n");
3384 		return PTR_ERR(regs);
3385 	}
3386 	gbe_dev->sgmii_port34_regs = regs;
3387 
3388 	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3389 	if (ret) {
3390 		dev_err(gbe_dev->dev,
3391 			"Can't translate of gbe node(%s) address at index %d\n",
3392 			node->name, GBE_SM_REG_INDEX);
3393 		return ret;
3394 	}
3395 
3396 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3397 	if (IS_ERR(regs)) {
3398 		dev_err(gbe_dev->dev,
3399 			"Failed to map gbe switch module register base\n");
3400 		return PTR_ERR(regs);
3401 	}
3402 	gbe_dev->switch_regs = regs;
3403 
3404 	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3405 	gbe_dev->et_stats = gbe13_et_stats;
3406 	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3407 
3408 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3409 					 gbe_dev->num_et_stats, sizeof(u64),
3410 					 GFP_KERNEL);
3411 	if (!gbe_dev->hw_stats) {
3412 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3413 		return -ENOMEM;
3414 	}
3415 
3416 	gbe_dev->hw_stats_prev =
3417 		devm_kcalloc(gbe_dev->dev,
3418 			     gbe_dev->num_et_stats, sizeof(u32),
3419 			     GFP_KERNEL);
3420 	if (!gbe_dev->hw_stats_prev) {
3421 		dev_err(gbe_dev->dev,
3422 			"hw_stats_prev memory allocation failed\n");
3423 		return -ENOMEM;
3424 	}
3425 
3426 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3427 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3428 
3429 	/* K2HK has only 2 hw stats modules visible at a time, so
3430 	 * module 0 & 2 points to one base and
3431 	 * module 1 & 3 points to the other base
3432 	 */
3433 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3434 		gbe_dev->hw_stats_regs[i] =
3435 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3436 			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3437 	}
3438 
3439 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3440 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3441 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3442 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3443 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3444 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3445 
3446 	/* Subsystem registers */
3447 	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3448 
3449 	/* Switch module registers */
3450 	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3451 	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3452 	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3453 	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3454 	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3455 	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3456 
3457 	/* Host port registers */
3458 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3459 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3460 	return 0;
3461 }
3462 
3463 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3464 				struct device_node *node)
3465 {
3466 	struct resource res;
3467 	void __iomem *regs;
3468 	int i, ret;
3469 
3470 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3471 	gbe_dev->et_stats = gbenu_et_stats;
3472 
3473 	if (IS_SS_ID_MU(gbe_dev))
3474 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3475 			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3476 	else
3477 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3478 					GBENU_ET_STATS_PORT_SIZE;
3479 
3480 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3481 					 gbe_dev->num_et_stats, sizeof(u64),
3482 					 GFP_KERNEL);
3483 	if (!gbe_dev->hw_stats) {
3484 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3485 		return -ENOMEM;
3486 	}
3487 
3488 	gbe_dev->hw_stats_prev =
3489 		devm_kcalloc(gbe_dev->dev,
3490 			     gbe_dev->num_et_stats, sizeof(u32),
3491 			     GFP_KERNEL);
3492 	if (!gbe_dev->hw_stats_prev) {
3493 		dev_err(gbe_dev->dev,
3494 			"hw_stats_prev memory allocation failed\n");
3495 		return -ENOMEM;
3496 	}
3497 
3498 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3499 	if (ret) {
3500 		dev_err(gbe_dev->dev,
3501 			"Can't translate of gbenu node(%s) addr at index %d\n",
3502 			node->name, GBENU_SM_REG_INDEX);
3503 		return ret;
3504 	}
3505 
3506 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3507 	if (IS_ERR(regs)) {
3508 		dev_err(gbe_dev->dev,
3509 			"Failed to map gbenu switch module register base\n");
3510 		return PTR_ERR(regs);
3511 	}
3512 	gbe_dev->switch_regs = regs;
3513 
3514 	if (!IS_SS_ID_2U(gbe_dev))
3515 		gbe_dev->sgmii_port_regs =
3516 		       gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3517 
3518 	/* Although sgmii modules are mem mapped to one contiguous
3519 	 * region on GBENU devices, setting sgmii_port34_regs allows
3520 	 * consistent code when accessing sgmii api
3521 	 */
3522 	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3523 				     (2 * GBENU_SGMII_MODULE_SIZE);
3524 
3525 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3526 
3527 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3528 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3529 			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3530 
3531 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3532 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3533 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3534 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3535 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3536 
3537 	/* Subsystem registers */
3538 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3539 	/* ok to set for MU, but used by 2U only */
3540 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3541 
3542 	/* Switch module registers */
3543 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3544 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3545 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3546 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3547 
3548 	/* Host port registers */
3549 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3550 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3551 
3552 	/* For NU only.  2U does not need tx_pri_map.
3553 	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3554 	 * while 2U has only 1 such thread
3555 	 */
3556 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3557 	return 0;
3558 }
3559 
3560 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3561 		     struct device_node *node, void **inst_priv)
3562 {
3563 	struct device_node *interfaces, *interface;
3564 	struct device_node *secondary_ports;
3565 	struct cpsw_ale_params ale_params;
3566 	struct gbe_priv *gbe_dev;
3567 	u32 slave_num;
3568 	int i, ret = 0;
3569 
3570 	if (!node) {
3571 		dev_err(dev, "device tree info unavailable\n");
3572 		return -ENODEV;
3573 	}
3574 
3575 	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3576 	if (!gbe_dev)
3577 		return -ENOMEM;
3578 
3579 	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3580 	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3581 		gbe_dev->max_num_slaves = 4;
3582 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3583 		gbe_dev->max_num_slaves = 8;
3584 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3585 		gbe_dev->max_num_slaves = 1;
3586 		gbe_module.set_rx_mode = gbe_set_rx_mode;
3587 	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3588 		gbe_dev->max_num_slaves = 2;
3589 	} else {
3590 		dev_err(dev, "device tree node for unknown device\n");
3591 		return -EINVAL;
3592 	}
3593 	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3594 
3595 	gbe_dev->dev = dev;
3596 	gbe_dev->netcp_device = netcp_device;
3597 	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3598 
3599 	/* init the hw stats lock */
3600 	spin_lock_init(&gbe_dev->hw_stats_lock);
3601 
3602 	if (of_find_property(node, "enable-ale", NULL)) {
3603 		gbe_dev->enable_ale = true;
3604 		dev_info(dev, "ALE enabled\n");
3605 	} else {
3606 		gbe_dev->enable_ale = false;
3607 		dev_dbg(dev, "ALE bypass enabled*\n");
3608 	}
3609 
3610 	ret = of_property_read_u32(node, "tx-queue",
3611 				   &gbe_dev->tx_queue_id);
3612 	if (ret < 0) {
3613 		dev_err(dev, "missing tx_queue parameter\n");
3614 		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3615 	}
3616 
3617 	ret = of_property_read_string(node, "tx-channel",
3618 				      &gbe_dev->dma_chan_name);
3619 	if (ret < 0) {
3620 		dev_err(dev, "missing \"tx-channel\" parameter\n");
3621 		return -EINVAL;
3622 	}
3623 
3624 	if (!strcmp(node->name, "gbe")) {
3625 		ret = get_gbe_resource_version(gbe_dev, node);
3626 		if (ret)
3627 			return ret;
3628 
3629 		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3630 
3631 		if (IS_SS_ID_VER_14(gbe_dev))
3632 			ret = set_gbe_ethss14_priv(gbe_dev, node);
3633 		else if (IS_SS_ID_MU(gbe_dev))
3634 			ret = set_gbenu_ethss_priv(gbe_dev, node);
3635 		else
3636 			ret = -ENODEV;
3637 
3638 	} else if (!strcmp(node->name, "xgbe")) {
3639 		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3640 		if (ret)
3641 			return ret;
3642 		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3643 					     gbe_dev->ss_regs);
3644 	} else {
3645 		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3646 		ret = -ENODEV;
3647 	}
3648 
3649 	if (ret)
3650 		return ret;
3651 
3652 	interfaces = of_get_child_by_name(node, "interfaces");
3653 	if (!interfaces)
3654 		dev_err(dev, "could not find interfaces\n");
3655 
3656 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3657 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3658 	if (ret)
3659 		return ret;
3660 
3661 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3662 	if (ret)
3663 		return ret;
3664 
3665 	/* Create network interfaces */
3666 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3667 	for_each_child_of_node(interfaces, interface) {
3668 		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3669 		if (ret) {
3670 			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3671 				interface->name);
3672 			continue;
3673 		}
3674 		gbe_dev->num_slaves++;
3675 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3676 			of_node_put(interface);
3677 			break;
3678 		}
3679 	}
3680 	of_node_put(interfaces);
3681 
3682 	if (!gbe_dev->num_slaves)
3683 		dev_warn(dev, "No network interface configured\n");
3684 
3685 	/* Initialize Secondary slave ports */
3686 	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3687 	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3688 	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3689 		init_secondary_ports(gbe_dev, secondary_ports);
3690 	of_node_put(secondary_ports);
3691 
3692 	if (!gbe_dev->num_slaves) {
3693 		dev_err(dev,
3694 			"No network interface or secondary ports configured\n");
3695 		ret = -ENODEV;
3696 		goto free_sec_ports;
3697 	}
3698 
3699 	memset(&ale_params, 0, sizeof(ale_params));
3700 	ale_params.dev		= gbe_dev->dev;
3701 	ale_params.ale_regs	= gbe_dev->ale_reg;
3702 	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3703 	ale_params.ale_entries	= gbe_dev->ale_entries;
3704 	ale_params.ale_ports	= gbe_dev->ale_ports;
3705 	if (IS_SS_ID_MU(gbe_dev)) {
3706 		ale_params.major_ver_mask = 0x7;
3707 		ale_params.nu_switch_ale = true;
3708 	}
3709 	gbe_dev->ale = cpsw_ale_create(&ale_params);
3710 	if (!gbe_dev->ale) {
3711 		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3712 		ret = -ENODEV;
3713 		goto free_sec_ports;
3714 	} else {
3715 		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3716 	}
3717 
3718 	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
3719 	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3720 		ret = PTR_ERR(gbe_dev->cpts);
3721 		goto free_sec_ports;
3722 	}
3723 
3724 	/* initialize host port */
3725 	gbe_init_host_port(gbe_dev);
3726 
3727 	spin_lock_bh(&gbe_dev->hw_stats_lock);
3728 	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3729 		if (IS_SS_ID_VER_14(gbe_dev))
3730 			gbe_reset_mod_stats_ver14(gbe_dev, i);
3731 		else
3732 			gbe_reset_mod_stats(gbe_dev, i);
3733 	}
3734 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3735 
3736 	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3737 	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3738 	add_timer(&gbe_dev->timer);
3739 	*inst_priv = gbe_dev;
3740 	return 0;
3741 
3742 free_sec_ports:
3743 	free_secondary_ports(gbe_dev);
3744 	return ret;
3745 }
3746 
3747 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3748 		      struct device_node *node, void **intf_priv)
3749 {
3750 	struct gbe_priv *gbe_dev = inst_priv;
3751 	struct gbe_intf *gbe_intf;
3752 	int ret;
3753 
3754 	if (!node) {
3755 		dev_err(gbe_dev->dev, "interface node not available\n");
3756 		return -ENODEV;
3757 	}
3758 
3759 	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3760 	if (!gbe_intf)
3761 		return -ENOMEM;
3762 
3763 	gbe_intf->ndev = ndev;
3764 	gbe_intf->dev = gbe_dev->dev;
3765 	gbe_intf->gbe_dev = gbe_dev;
3766 
3767 	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3768 					sizeof(*gbe_intf->slave),
3769 					GFP_KERNEL);
3770 	if (!gbe_intf->slave) {
3771 		ret = -ENOMEM;
3772 		goto fail;
3773 	}
3774 
3775 	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3776 		ret = -ENODEV;
3777 		goto fail;
3778 	}
3779 
3780 	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3781 	ndev->ethtool_ops = &keystone_ethtool_ops;
3782 	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3783 	*intf_priv = gbe_intf;
3784 	return 0;
3785 
3786 fail:
3787 	if (gbe_intf->slave)
3788 		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3789 	if (gbe_intf)
3790 		devm_kfree(gbe_dev->dev, gbe_intf);
3791 	return ret;
3792 }
3793 
3794 static int gbe_release(void *intf_priv)
3795 {
3796 	struct gbe_intf *gbe_intf = intf_priv;
3797 
3798 	gbe_intf->ndev->ethtool_ops = NULL;
3799 	list_del(&gbe_intf->gbe_intf_list);
3800 	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3801 	devm_kfree(gbe_intf->dev, gbe_intf);
3802 	return 0;
3803 }
3804 
3805 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3806 {
3807 	struct gbe_priv *gbe_dev = inst_priv;
3808 
3809 	del_timer_sync(&gbe_dev->timer);
3810 	cpts_release(gbe_dev->cpts);
3811 	cpsw_ale_stop(gbe_dev->ale);
3812 	netcp_txpipe_close(&gbe_dev->tx_pipe);
3813 	free_secondary_ports(gbe_dev);
3814 
3815 	if (!list_empty(&gbe_dev->gbe_intf_head))
3816 		dev_alert(gbe_dev->dev,
3817 			  "unreleased ethss interfaces present\n");
3818 
3819 	return 0;
3820 }
3821 
3822 static struct netcp_module gbe_module = {
3823 	.name		= GBE_MODULE_NAME,
3824 	.owner		= THIS_MODULE,
3825 	.primary	= true,
3826 	.probe		= gbe_probe,
3827 	.open		= gbe_open,
3828 	.close		= gbe_close,
3829 	.remove		= gbe_remove,
3830 	.attach		= gbe_attach,
3831 	.release	= gbe_release,
3832 	.add_addr	= gbe_add_addr,
3833 	.del_addr	= gbe_del_addr,
3834 	.add_vid	= gbe_add_vid,
3835 	.del_vid	= gbe_del_vid,
3836 	.ioctl		= gbe_ioctl,
3837 };
3838 
3839 static struct netcp_module xgbe_module = {
3840 	.name		= XGBE_MODULE_NAME,
3841 	.owner		= THIS_MODULE,
3842 	.primary	= true,
3843 	.probe		= gbe_probe,
3844 	.open		= gbe_open,
3845 	.close		= gbe_close,
3846 	.remove		= gbe_remove,
3847 	.attach		= gbe_attach,
3848 	.release	= gbe_release,
3849 	.add_addr	= gbe_add_addr,
3850 	.del_addr	= gbe_del_addr,
3851 	.add_vid	= gbe_add_vid,
3852 	.del_vid	= gbe_del_vid,
3853 	.ioctl		= gbe_ioctl,
3854 };
3855 
3856 static int __init keystone_gbe_init(void)
3857 {
3858 	int ret;
3859 
3860 	ret = netcp_register_module(&gbe_module);
3861 	if (ret)
3862 		return ret;
3863 
3864 	ret = netcp_register_module(&xgbe_module);
3865 	if (ret)
3866 		return ret;
3867 
3868 	return 0;
3869 }
3870 module_init(keystone_gbe_init);
3871 
3872 static void __exit keystone_gbe_exit(void)
3873 {
3874 	netcp_unregister_module(&gbe_module);
3875 	netcp_unregister_module(&xgbe_module);
3876 }
3877 module_exit(keystone_gbe_exit);
3878 
3879 MODULE_LICENSE("GPL v2");
3880 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3881 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3882