1 /*
2  * Keystone GBE and XGBE subsystem code
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Sandeep Paulraj <s-paulraj@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *		Wingman Kwok <w-kwok2@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation version 2.
14  *
15  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16  * kind, whether express or implied; without even the implied warranty
17  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/ptp_classify.h>
28 #include <linux/net_tstamp.h>
29 #include <linux/ethtool.h>
30 
31 #include "cpsw.h"
32 #include "cpsw_ale.h"
33 #include "netcp.h"
34 #include "cpts.h"
35 
36 #define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
37 #define NETCP_DRIVER_VERSION		"v1.0"
38 
39 #define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
40 #define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
41 #define GBE_MINOR_VERSION(reg)		(reg & 0xff)
42 #define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
43 
44 /* 1G Ethernet SS defines */
45 #define GBE_MODULE_NAME			"netcp-gbe"
46 #define GBE_SS_VERSION_14		0x4ed2
47 
48 #define GBE_SS_REG_INDEX		0
49 #define GBE_SGMII34_REG_INDEX		1
50 #define GBE_SM_REG_INDEX		2
51 /* offset relative to base of GBE_SS_REG_INDEX */
52 #define GBE13_SGMII_MODULE_OFFSET	0x100
53 /* offset relative to base of GBE_SM_REG_INDEX */
54 #define GBE13_HOST_PORT_OFFSET		0x34
55 #define GBE13_SLAVE_PORT_OFFSET		0x60
56 #define GBE13_EMAC_OFFSET		0x100
57 #define GBE13_SLAVE_PORT2_OFFSET	0x200
58 #define GBE13_HW_STATS_OFFSET		0x300
59 #define GBE13_CPTS_OFFSET		0x500
60 #define GBE13_ALE_OFFSET		0x600
61 #define GBE13_HOST_PORT_NUM		0
62 #define GBE13_NUM_ALE_ENTRIES		1024
63 
64 /* 1G Ethernet NU SS defines */
65 #define GBENU_MODULE_NAME		"netcp-gbenu"
66 #define GBE_SS_ID_NU			0x4ee6
67 #define GBE_SS_ID_2U			0x4ee8
68 
69 #define IS_SS_ID_MU(d) \
70 	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
71 	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
72 
73 #define IS_SS_ID_NU(d) \
74 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
75 
76 #define IS_SS_ID_VER_14(d) \
77 	(GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
78 #define IS_SS_ID_2U(d) \
79 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
80 
81 #define GBENU_SS_REG_INDEX		0
82 #define GBENU_SM_REG_INDEX		1
83 #define GBENU_SGMII_MODULE_OFFSET	0x100
84 #define GBENU_HOST_PORT_OFFSET		0x1000
85 #define GBENU_SLAVE_PORT_OFFSET		0x2000
86 #define GBENU_EMAC_OFFSET		0x2330
87 #define GBENU_HW_STATS_OFFSET		0x1a000
88 #define GBENU_CPTS_OFFSET		0x1d000
89 #define GBENU_ALE_OFFSET		0x1e000
90 #define GBENU_HOST_PORT_NUM		0
91 #define GBENU_SGMII_MODULE_SIZE		0x100
92 
93 /* 10G Ethernet SS defines */
94 #define XGBE_MODULE_NAME		"netcp-xgbe"
95 #define XGBE_SS_VERSION_10		0x4ee4
96 
97 #define XGBE_SS_REG_INDEX		0
98 #define XGBE_SM_REG_INDEX		1
99 #define XGBE_SERDES_REG_INDEX		2
100 
101 /* offset relative to base of XGBE_SS_REG_INDEX */
102 #define XGBE10_SGMII_MODULE_OFFSET	0x100
103 #define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
104 /* offset relative to base of XGBE_SM_REG_INDEX */
105 #define XGBE10_HOST_PORT_OFFSET		0x34
106 #define XGBE10_SLAVE_PORT_OFFSET	0x64
107 #define XGBE10_EMAC_OFFSET		0x400
108 #define XGBE10_CPTS_OFFSET		0x600
109 #define XGBE10_ALE_OFFSET		0x700
110 #define XGBE10_HW_STATS_OFFSET		0x800
111 #define XGBE10_HOST_PORT_NUM		0
112 #define XGBE10_NUM_ALE_ENTRIES		2048
113 
114 #define	GBE_TIMER_INTERVAL			(HZ / 2)
115 
116 /* Soft reset register values */
117 #define SOFT_RESET_MASK				BIT(0)
118 #define SOFT_RESET				BIT(0)
119 #define DEVICE_EMACSL_RESET_POLL_COUNT		100
120 #define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
121 
122 #define MACSL_RX_ENABLE_CSF			BIT(23)
123 #define MACSL_ENABLE_EXT_CTL			BIT(18)
124 #define MACSL_XGMII_ENABLE			BIT(13)
125 #define MACSL_XGIG_MODE				BIT(8)
126 #define MACSL_GIG_MODE				BIT(7)
127 #define MACSL_GMII_ENABLE			BIT(5)
128 #define MACSL_FULLDUPLEX			BIT(0)
129 
130 #define GBE_CTL_P0_ENABLE			BIT(2)
131 #define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
132 #define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
133 #define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
134 #define GBE_STATS_CD_SEL			BIT(28)
135 
136 #define GBE_PORT_MASK(x)			(BIT(x) - 1)
137 #define GBE_MASK_NO_PORTS			0
138 
139 #define GBE_DEF_1G_MAC_CONTROL					\
140 		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
141 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
142 
143 #define GBE_DEF_10G_MAC_CONTROL				\
144 		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
145 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
146 
147 #define GBE_STATSA_MODULE			0
148 #define GBE_STATSB_MODULE			1
149 #define GBE_STATSC_MODULE			2
150 #define GBE_STATSD_MODULE			3
151 
152 #define GBENU_STATS0_MODULE			0
153 #define GBENU_STATS1_MODULE			1
154 #define GBENU_STATS2_MODULE			2
155 #define GBENU_STATS3_MODULE			3
156 #define GBENU_STATS4_MODULE			4
157 #define GBENU_STATS5_MODULE			5
158 #define GBENU_STATS6_MODULE			6
159 #define GBENU_STATS7_MODULE			7
160 #define GBENU_STATS8_MODULE			8
161 
162 #define XGBE_STATS0_MODULE			0
163 #define XGBE_STATS1_MODULE			1
164 #define XGBE_STATS2_MODULE			2
165 
166 /* s: 0-based slave_port */
167 #define SGMII_BASE(d, s) \
168 	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
169 
170 #define GBE_TX_QUEUE				648
171 #define	GBE_TXHOOK_ORDER			0
172 #define	GBE_RXHOOK_ORDER			0
173 #define GBE_DEFAULT_ALE_AGEOUT			30
174 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
175 #define SLAVE_LINK_IS_RGMII(s) \
176 	(((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
177 	 ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
178 #define SLAVE_LINK_IS_SGMII(s) \
179 	((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
180 #define NETCP_LINK_STATE_INVALID		-1
181 
182 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
183 		offsetof(struct gbe##_##rb, rn)
184 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
185 		offsetof(struct gbenu##_##rb, rn)
186 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
187 		offsetof(struct xgbe##_##rb, rn)
188 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
189 
190 #define HOST_TX_PRI_MAP_DEFAULT			0x00000000
191 
192 #if IS_ENABLED(CONFIG_TI_CPTS)
193 /* Px_TS_CTL register fields */
194 #define TS_RX_ANX_F_EN				BIT(0)
195 #define TS_RX_VLAN_LT1_EN			BIT(1)
196 #define TS_RX_VLAN_LT2_EN			BIT(2)
197 #define TS_RX_ANX_D_EN				BIT(3)
198 #define TS_TX_ANX_F_EN				BIT(4)
199 #define TS_TX_VLAN_LT1_EN			BIT(5)
200 #define TS_TX_VLAN_LT2_EN			BIT(6)
201 #define TS_TX_ANX_D_EN				BIT(7)
202 #define TS_LT2_EN				BIT(8)
203 #define TS_RX_ANX_E_EN				BIT(9)
204 #define TS_TX_ANX_E_EN				BIT(10)
205 #define TS_MSG_TYPE_EN_SHIFT			16
206 #define TS_MSG_TYPE_EN_MASK			0xffff
207 
208 /* Px_TS_SEQ_LTYPE register fields */
209 #define TS_SEQ_ID_OFS_SHIFT			16
210 #define TS_SEQ_ID_OFS_MASK			0x3f
211 
212 /* Px_TS_CTL_LTYPE2 register fields */
213 #define TS_107					BIT(16)
214 #define TS_129					BIT(17)
215 #define TS_130					BIT(18)
216 #define TS_131					BIT(19)
217 #define TS_132					BIT(20)
218 #define TS_319					BIT(21)
219 #define TS_320					BIT(22)
220 #define TS_TTL_NONZERO				BIT(23)
221 #define TS_UNI_EN				BIT(24)
222 #define TS_UNI_EN_SHIFT				24
223 
224 #define TS_TX_ANX_ALL_EN	 \
225 	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
226 
227 #define TS_RX_ANX_ALL_EN	 \
228 	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
229 
230 #define TS_CTL_DST_PORT				TS_319
231 #define TS_CTL_DST_PORT_SHIFT			21
232 
233 #define TS_CTL_MADDR_ALL	\
234 	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
235 
236 #define TS_CTL_MADDR_SHIFT			16
237 
238 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
239 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
240 #endif /* CONFIG_TI_CPTS */
241 
242 struct xgbe_ss_regs {
243 	u32	id_ver;
244 	u32	synce_count;
245 	u32	synce_mux;
246 	u32	control;
247 };
248 
249 struct xgbe_switch_regs {
250 	u32	id_ver;
251 	u32	control;
252 	u32	emcontrol;
253 	u32	stat_port_en;
254 	u32	ptype;
255 	u32	soft_idle;
256 	u32	thru_rate;
257 	u32	gap_thresh;
258 	u32	tx_start_wds;
259 	u32	flow_control;
260 	u32	cppi_thresh;
261 };
262 
263 struct xgbe_port_regs {
264 	u32	blk_cnt;
265 	u32	port_vlan;
266 	u32	tx_pri_map;
267 	u32	sa_lo;
268 	u32	sa_hi;
269 	u32	ts_ctl;
270 	u32	ts_seq_ltype;
271 	u32	ts_vlan;
272 	u32	ts_ctl_ltype2;
273 	u32	ts_ctl2;
274 	u32	control;
275 };
276 
277 struct xgbe_host_port_regs {
278 	u32	blk_cnt;
279 	u32	port_vlan;
280 	u32	tx_pri_map;
281 	u32	src_id;
282 	u32	rx_pri_map;
283 	u32	rx_maxlen;
284 };
285 
286 struct xgbe_emac_regs {
287 	u32	id_ver;
288 	u32	mac_control;
289 	u32	mac_status;
290 	u32	soft_reset;
291 	u32	rx_maxlen;
292 	u32	__reserved_0;
293 	u32	rx_pause;
294 	u32	tx_pause;
295 	u32	em_control;
296 	u32	__reserved_1;
297 	u32	tx_gap;
298 	u32	rsvd[4];
299 };
300 
301 struct xgbe_host_hw_stats {
302 	u32	rx_good_frames;
303 	u32	rx_broadcast_frames;
304 	u32	rx_multicast_frames;
305 	u32	__rsvd_0[3];
306 	u32	rx_oversized_frames;
307 	u32	__rsvd_1;
308 	u32	rx_undersized_frames;
309 	u32	__rsvd_2;
310 	u32	overrun_type4;
311 	u32	overrun_type5;
312 	u32	rx_bytes;
313 	u32	tx_good_frames;
314 	u32	tx_broadcast_frames;
315 	u32	tx_multicast_frames;
316 	u32	__rsvd_3[9];
317 	u32	tx_bytes;
318 	u32	tx_64byte_frames;
319 	u32	tx_65_to_127byte_frames;
320 	u32	tx_128_to_255byte_frames;
321 	u32	tx_256_to_511byte_frames;
322 	u32	tx_512_to_1023byte_frames;
323 	u32	tx_1024byte_frames;
324 	u32	net_bytes;
325 	u32	rx_sof_overruns;
326 	u32	rx_mof_overruns;
327 	u32	rx_dma_overruns;
328 };
329 
330 struct xgbe_hw_stats {
331 	u32	rx_good_frames;
332 	u32	rx_broadcast_frames;
333 	u32	rx_multicast_frames;
334 	u32	rx_pause_frames;
335 	u32	rx_crc_errors;
336 	u32	rx_align_code_errors;
337 	u32	rx_oversized_frames;
338 	u32	rx_jabber_frames;
339 	u32	rx_undersized_frames;
340 	u32	rx_fragments;
341 	u32	overrun_type4;
342 	u32	overrun_type5;
343 	u32	rx_bytes;
344 	u32	tx_good_frames;
345 	u32	tx_broadcast_frames;
346 	u32	tx_multicast_frames;
347 	u32	tx_pause_frames;
348 	u32	tx_deferred_frames;
349 	u32	tx_collision_frames;
350 	u32	tx_single_coll_frames;
351 	u32	tx_mult_coll_frames;
352 	u32	tx_excessive_collisions;
353 	u32	tx_late_collisions;
354 	u32	tx_underrun;
355 	u32	tx_carrier_sense_errors;
356 	u32	tx_bytes;
357 	u32	tx_64byte_frames;
358 	u32	tx_65_to_127byte_frames;
359 	u32	tx_128_to_255byte_frames;
360 	u32	tx_256_to_511byte_frames;
361 	u32	tx_512_to_1023byte_frames;
362 	u32	tx_1024byte_frames;
363 	u32	net_bytes;
364 	u32	rx_sof_overruns;
365 	u32	rx_mof_overruns;
366 	u32	rx_dma_overruns;
367 };
368 
369 struct gbenu_ss_regs {
370 	u32	id_ver;
371 	u32	synce_count;		/* NU */
372 	u32	synce_mux;		/* NU */
373 	u32	control;		/* 2U */
374 	u32	__rsvd_0[2];		/* 2U */
375 	u32	rgmii_status;		/* 2U */
376 	u32	ss_status;		/* 2U */
377 };
378 
379 struct gbenu_switch_regs {
380 	u32	id_ver;
381 	u32	control;
382 	u32	__rsvd_0[2];
383 	u32	emcontrol;
384 	u32	stat_port_en;
385 	u32	ptype;			/* NU */
386 	u32	soft_idle;
387 	u32	thru_rate;		/* NU */
388 	u32	gap_thresh;		/* NU */
389 	u32	tx_start_wds;		/* NU */
390 	u32	eee_prescale;		/* 2U */
391 	u32	tx_g_oflow_thresh_set;	/* NU */
392 	u32	tx_g_oflow_thresh_clr;	/* NU */
393 	u32	tx_g_buf_thresh_set_l;	/* NU */
394 	u32	tx_g_buf_thresh_set_h;	/* NU */
395 	u32	tx_g_buf_thresh_clr_l;	/* NU */
396 	u32	tx_g_buf_thresh_clr_h;	/* NU */
397 };
398 
399 struct gbenu_port_regs {
400 	u32	__rsvd_0;
401 	u32	control;
402 	u32	max_blks;		/* 2U */
403 	u32	mem_align1;
404 	u32	blk_cnt;
405 	u32	port_vlan;
406 	u32	tx_pri_map;		/* NU */
407 	u32	pri_ctl;		/* 2U */
408 	u32	rx_pri_map;
409 	u32	rx_maxlen;
410 	u32	tx_blks_pri;		/* NU */
411 	u32	__rsvd_1;
412 	u32	idle2lpi;		/* 2U */
413 	u32	lpi2idle;		/* 2U */
414 	u32	eee_status;		/* 2U */
415 	u32	__rsvd_2;
416 	u32	__rsvd_3[176];		/* NU: more to add */
417 	u32	__rsvd_4[2];
418 	u32	sa_lo;
419 	u32	sa_hi;
420 	u32	ts_ctl;
421 	u32	ts_seq_ltype;
422 	u32	ts_vlan;
423 	u32	ts_ctl_ltype2;
424 	u32	ts_ctl2;
425 };
426 
427 struct gbenu_host_port_regs {
428 	u32	__rsvd_0;
429 	u32	control;
430 	u32	flow_id_offset;		/* 2U */
431 	u32	__rsvd_1;
432 	u32	blk_cnt;
433 	u32	port_vlan;
434 	u32	tx_pri_map;		/* NU */
435 	u32	pri_ctl;
436 	u32	rx_pri_map;
437 	u32	rx_maxlen;
438 	u32	tx_blks_pri;		/* NU */
439 	u32	__rsvd_2;
440 	u32	idle2lpi;		/* 2U */
441 	u32	lpi2wake;		/* 2U */
442 	u32	eee_status;		/* 2U */
443 	u32	__rsvd_3;
444 	u32	__rsvd_4[184];		/* NU */
445 	u32	host_blks_pri;		/* NU */
446 };
447 
448 struct gbenu_emac_regs {
449 	u32	mac_control;
450 	u32	mac_status;
451 	u32	soft_reset;
452 	u32	boff_test;
453 	u32	rx_pause;
454 	u32	__rsvd_0[11];		/* NU */
455 	u32	tx_pause;
456 	u32	__rsvd_1[11];		/* NU */
457 	u32	em_control;
458 	u32	tx_gap;
459 };
460 
461 /* Some hw stat regs are applicable to slave port only.
462  * This is handled by gbenu_et_stats struct.  Also some
463  * are for SS version NU and some are for 2U.
464  */
465 struct gbenu_hw_stats {
466 	u32	rx_good_frames;
467 	u32	rx_broadcast_frames;
468 	u32	rx_multicast_frames;
469 	u32	rx_pause_frames;		/* slave */
470 	u32	rx_crc_errors;
471 	u32	rx_align_code_errors;		/* slave */
472 	u32	rx_oversized_frames;
473 	u32	rx_jabber_frames;		/* slave */
474 	u32	rx_undersized_frames;
475 	u32	rx_fragments;			/* slave */
476 	u32	ale_drop;
477 	u32	ale_overrun_drop;
478 	u32	rx_bytes;
479 	u32	tx_good_frames;
480 	u32	tx_broadcast_frames;
481 	u32	tx_multicast_frames;
482 	u32	tx_pause_frames;		/* slave */
483 	u32	tx_deferred_frames;		/* slave */
484 	u32	tx_collision_frames;		/* slave */
485 	u32	tx_single_coll_frames;		/* slave */
486 	u32	tx_mult_coll_frames;		/* slave */
487 	u32	tx_excessive_collisions;	/* slave */
488 	u32	tx_late_collisions;		/* slave */
489 	u32	rx_ipg_error;			/* slave 10G only */
490 	u32	tx_carrier_sense_errors;	/* slave */
491 	u32	tx_bytes;
492 	u32	tx_64B_frames;
493 	u32	tx_65_to_127B_frames;
494 	u32	tx_128_to_255B_frames;
495 	u32	tx_256_to_511B_frames;
496 	u32	tx_512_to_1023B_frames;
497 	u32	tx_1024B_frames;
498 	u32	net_bytes;
499 	u32	rx_bottom_fifo_drop;
500 	u32	rx_port_mask_drop;
501 	u32	rx_top_fifo_drop;
502 	u32	ale_rate_limit_drop;
503 	u32	ale_vid_ingress_drop;
504 	u32	ale_da_eq_sa_drop;
505 	u32	__rsvd_0[3];
506 	u32	ale_unknown_ucast;
507 	u32	ale_unknown_ucast_bytes;
508 	u32	ale_unknown_mcast;
509 	u32	ale_unknown_mcast_bytes;
510 	u32	ale_unknown_bcast;
511 	u32	ale_unknown_bcast_bytes;
512 	u32	ale_pol_match;
513 	u32	ale_pol_match_red;		/* NU */
514 	u32	ale_pol_match_yellow;		/* NU */
515 	u32	__rsvd_1[44];
516 	u32	tx_mem_protect_err;
517 	/* following NU only */
518 	u32	tx_pri0;
519 	u32	tx_pri1;
520 	u32	tx_pri2;
521 	u32	tx_pri3;
522 	u32	tx_pri4;
523 	u32	tx_pri5;
524 	u32	tx_pri6;
525 	u32	tx_pri7;
526 	u32	tx_pri0_bcnt;
527 	u32	tx_pri1_bcnt;
528 	u32	tx_pri2_bcnt;
529 	u32	tx_pri3_bcnt;
530 	u32	tx_pri4_bcnt;
531 	u32	tx_pri5_bcnt;
532 	u32	tx_pri6_bcnt;
533 	u32	tx_pri7_bcnt;
534 	u32	tx_pri0_drop;
535 	u32	tx_pri1_drop;
536 	u32	tx_pri2_drop;
537 	u32	tx_pri3_drop;
538 	u32	tx_pri4_drop;
539 	u32	tx_pri5_drop;
540 	u32	tx_pri6_drop;
541 	u32	tx_pri7_drop;
542 	u32	tx_pri0_drop_bcnt;
543 	u32	tx_pri1_drop_bcnt;
544 	u32	tx_pri2_drop_bcnt;
545 	u32	tx_pri3_drop_bcnt;
546 	u32	tx_pri4_drop_bcnt;
547 	u32	tx_pri5_drop_bcnt;
548 	u32	tx_pri6_drop_bcnt;
549 	u32	tx_pri7_drop_bcnt;
550 };
551 
552 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
553 
554 struct gbe_ss_regs {
555 	u32	id_ver;
556 	u32	synce_count;
557 	u32	synce_mux;
558 };
559 
560 struct gbe_ss_regs_ofs {
561 	u16	id_ver;
562 	u16	control;
563 	u16	rgmii_status; /* 2U */
564 };
565 
566 struct gbe_switch_regs {
567 	u32	id_ver;
568 	u32	control;
569 	u32	soft_reset;
570 	u32	stat_port_en;
571 	u32	ptype;
572 	u32	soft_idle;
573 	u32	thru_rate;
574 	u32	gap_thresh;
575 	u32	tx_start_wds;
576 	u32	flow_control;
577 };
578 
579 struct gbe_switch_regs_ofs {
580 	u16	id_ver;
581 	u16	control;
582 	u16	soft_reset;
583 	u16	emcontrol;
584 	u16	stat_port_en;
585 	u16	ptype;
586 	u16	flow_control;
587 };
588 
589 struct gbe_port_regs {
590 	u32	max_blks;
591 	u32	blk_cnt;
592 	u32	port_vlan;
593 	u32	tx_pri_map;
594 	u32	sa_lo;
595 	u32	sa_hi;
596 	u32	ts_ctl;
597 	u32	ts_seq_ltype;
598 	u32	ts_vlan;
599 	u32	ts_ctl_ltype2;
600 	u32	ts_ctl2;
601 };
602 
603 struct gbe_port_regs_ofs {
604 	u16	port_vlan;
605 	u16	tx_pri_map;
606 	u16     rx_pri_map;
607 	u16	sa_lo;
608 	u16	sa_hi;
609 	u16	ts_ctl;
610 	u16	ts_seq_ltype;
611 	u16	ts_vlan;
612 	u16	ts_ctl_ltype2;
613 	u16	ts_ctl2;
614 	u16	rx_maxlen;	/* 2U, NU */
615 };
616 
617 struct gbe_host_port_regs {
618 	u32	src_id;
619 	u32	port_vlan;
620 	u32	rx_pri_map;
621 	u32	rx_maxlen;
622 };
623 
624 struct gbe_host_port_regs_ofs {
625 	u16	port_vlan;
626 	u16	tx_pri_map;
627 	u16	rx_maxlen;
628 };
629 
630 struct gbe_emac_regs {
631 	u32	id_ver;
632 	u32	mac_control;
633 	u32	mac_status;
634 	u32	soft_reset;
635 	u32	rx_maxlen;
636 	u32	__reserved_0;
637 	u32	rx_pause;
638 	u32	tx_pause;
639 	u32	__reserved_1;
640 	u32	rx_pri_map;
641 	u32	rsvd[6];
642 };
643 
644 struct gbe_emac_regs_ofs {
645 	u16	mac_control;
646 	u16	soft_reset;
647 	u16	rx_maxlen;
648 };
649 
650 struct gbe_hw_stats {
651 	u32	rx_good_frames;
652 	u32	rx_broadcast_frames;
653 	u32	rx_multicast_frames;
654 	u32	rx_pause_frames;
655 	u32	rx_crc_errors;
656 	u32	rx_align_code_errors;
657 	u32	rx_oversized_frames;
658 	u32	rx_jabber_frames;
659 	u32	rx_undersized_frames;
660 	u32	rx_fragments;
661 	u32	__pad_0[2];
662 	u32	rx_bytes;
663 	u32	tx_good_frames;
664 	u32	tx_broadcast_frames;
665 	u32	tx_multicast_frames;
666 	u32	tx_pause_frames;
667 	u32	tx_deferred_frames;
668 	u32	tx_collision_frames;
669 	u32	tx_single_coll_frames;
670 	u32	tx_mult_coll_frames;
671 	u32	tx_excessive_collisions;
672 	u32	tx_late_collisions;
673 	u32	tx_underrun;
674 	u32	tx_carrier_sense_errors;
675 	u32	tx_bytes;
676 	u32	tx_64byte_frames;
677 	u32	tx_65_to_127byte_frames;
678 	u32	tx_128_to_255byte_frames;
679 	u32	tx_256_to_511byte_frames;
680 	u32	tx_512_to_1023byte_frames;
681 	u32	tx_1024byte_frames;
682 	u32	net_bytes;
683 	u32	rx_sof_overruns;
684 	u32	rx_mof_overruns;
685 	u32	rx_dma_overruns;
686 };
687 
688 #define GBE_MAX_HW_STAT_MODS			9
689 #define GBE_HW_STATS_REG_MAP_SZ			0x100
690 
691 struct ts_ctl {
692 	int     uni;
693 	u8      dst_port_map;
694 	u8      maddr_map;
695 	u8      ts_mcast_type;
696 };
697 
698 struct gbe_slave {
699 	void __iomem			*port_regs;
700 	void __iomem			*emac_regs;
701 	struct gbe_port_regs_ofs	port_regs_ofs;
702 	struct gbe_emac_regs_ofs	emac_regs_ofs;
703 	int				slave_num; /* 0 based logical number */
704 	int				port_num;  /* actual port number */
705 	atomic_t			link_state;
706 	bool				open;
707 	struct phy_device		*phy;
708 	u32				link_interface;
709 	u32				mac_control;
710 	u8				phy_port_t;
711 	struct device_node		*node;
712 	struct device_node		*phy_node;
713 	struct ts_ctl                   ts_ctl;
714 	struct list_head		slave_list;
715 };
716 
717 struct gbe_priv {
718 	struct device			*dev;
719 	struct netcp_device		*netcp_device;
720 	struct timer_list		timer;
721 	u32				num_slaves;
722 	u32				ale_entries;
723 	u32				ale_ports;
724 	bool				enable_ale;
725 	u8				max_num_slaves;
726 	u8				max_num_ports; /* max_num_slaves + 1 */
727 	u8				num_stats_mods;
728 	struct netcp_tx_pipe		tx_pipe;
729 
730 	int				host_port;
731 	u32				rx_packet_max;
732 	u32				ss_version;
733 	u32				stats_en_mask;
734 
735 	void __iomem			*ss_regs;
736 	void __iomem			*switch_regs;
737 	void __iomem			*host_port_regs;
738 	void __iomem			*ale_reg;
739 	void __iomem                    *cpts_reg;
740 	void __iomem			*sgmii_port_regs;
741 	void __iomem			*sgmii_port34_regs;
742 	void __iomem			*xgbe_serdes_regs;
743 	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
744 
745 	struct gbe_ss_regs_ofs		ss_regs_ofs;
746 	struct gbe_switch_regs_ofs	switch_regs_ofs;
747 	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
748 
749 	struct cpsw_ale			*ale;
750 	unsigned int			tx_queue_id;
751 	const char			*dma_chan_name;
752 
753 	struct list_head		gbe_intf_head;
754 	struct list_head		secondary_slaves;
755 	struct net_device		*dummy_ndev;
756 
757 	u64				*hw_stats;
758 	u32				*hw_stats_prev;
759 	const struct netcp_ethtool_stat *et_stats;
760 	int				num_et_stats;
761 	/*  Lock for updating the hwstats */
762 	spinlock_t			hw_stats_lock;
763 
764 	int                             cpts_registered;
765 	struct cpts                     *cpts;
766 	int				rx_ts_enabled;
767 	int				tx_ts_enabled;
768 };
769 
770 struct gbe_intf {
771 	struct net_device	*ndev;
772 	struct device		*dev;
773 	struct gbe_priv		*gbe_dev;
774 	struct netcp_tx_pipe	tx_pipe;
775 	struct gbe_slave	*slave;
776 	struct list_head	gbe_intf_list;
777 	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
778 };
779 
780 static struct netcp_module gbe_module;
781 static struct netcp_module xgbe_module;
782 
783 /* Statistic management */
784 struct netcp_ethtool_stat {
785 	char desc[ETH_GSTRING_LEN];
786 	int type;
787 	u32 size;
788 	int offset;
789 };
790 
791 #define GBE_STATSA_INFO(field)						\
792 {									\
793 	"GBE_A:"#field, GBE_STATSA_MODULE,				\
794 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
795 	offsetof(struct gbe_hw_stats, field)				\
796 }
797 
798 #define GBE_STATSB_INFO(field)						\
799 {									\
800 	"GBE_B:"#field, GBE_STATSB_MODULE,				\
801 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
802 	offsetof(struct gbe_hw_stats, field)				\
803 }
804 
805 #define GBE_STATSC_INFO(field)						\
806 {									\
807 	"GBE_C:"#field, GBE_STATSC_MODULE,				\
808 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
809 	offsetof(struct gbe_hw_stats, field)				\
810 }
811 
812 #define GBE_STATSD_INFO(field)						\
813 {									\
814 	"GBE_D:"#field, GBE_STATSD_MODULE,				\
815 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
816 	offsetof(struct gbe_hw_stats, field)				\
817 }
818 
819 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
820 	/* GBE module A */
821 	GBE_STATSA_INFO(rx_good_frames),
822 	GBE_STATSA_INFO(rx_broadcast_frames),
823 	GBE_STATSA_INFO(rx_multicast_frames),
824 	GBE_STATSA_INFO(rx_pause_frames),
825 	GBE_STATSA_INFO(rx_crc_errors),
826 	GBE_STATSA_INFO(rx_align_code_errors),
827 	GBE_STATSA_INFO(rx_oversized_frames),
828 	GBE_STATSA_INFO(rx_jabber_frames),
829 	GBE_STATSA_INFO(rx_undersized_frames),
830 	GBE_STATSA_INFO(rx_fragments),
831 	GBE_STATSA_INFO(rx_bytes),
832 	GBE_STATSA_INFO(tx_good_frames),
833 	GBE_STATSA_INFO(tx_broadcast_frames),
834 	GBE_STATSA_INFO(tx_multicast_frames),
835 	GBE_STATSA_INFO(tx_pause_frames),
836 	GBE_STATSA_INFO(tx_deferred_frames),
837 	GBE_STATSA_INFO(tx_collision_frames),
838 	GBE_STATSA_INFO(tx_single_coll_frames),
839 	GBE_STATSA_INFO(tx_mult_coll_frames),
840 	GBE_STATSA_INFO(tx_excessive_collisions),
841 	GBE_STATSA_INFO(tx_late_collisions),
842 	GBE_STATSA_INFO(tx_underrun),
843 	GBE_STATSA_INFO(tx_carrier_sense_errors),
844 	GBE_STATSA_INFO(tx_bytes),
845 	GBE_STATSA_INFO(tx_64byte_frames),
846 	GBE_STATSA_INFO(tx_65_to_127byte_frames),
847 	GBE_STATSA_INFO(tx_128_to_255byte_frames),
848 	GBE_STATSA_INFO(tx_256_to_511byte_frames),
849 	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
850 	GBE_STATSA_INFO(tx_1024byte_frames),
851 	GBE_STATSA_INFO(net_bytes),
852 	GBE_STATSA_INFO(rx_sof_overruns),
853 	GBE_STATSA_INFO(rx_mof_overruns),
854 	GBE_STATSA_INFO(rx_dma_overruns),
855 	/* GBE module B */
856 	GBE_STATSB_INFO(rx_good_frames),
857 	GBE_STATSB_INFO(rx_broadcast_frames),
858 	GBE_STATSB_INFO(rx_multicast_frames),
859 	GBE_STATSB_INFO(rx_pause_frames),
860 	GBE_STATSB_INFO(rx_crc_errors),
861 	GBE_STATSB_INFO(rx_align_code_errors),
862 	GBE_STATSB_INFO(rx_oversized_frames),
863 	GBE_STATSB_INFO(rx_jabber_frames),
864 	GBE_STATSB_INFO(rx_undersized_frames),
865 	GBE_STATSB_INFO(rx_fragments),
866 	GBE_STATSB_INFO(rx_bytes),
867 	GBE_STATSB_INFO(tx_good_frames),
868 	GBE_STATSB_INFO(tx_broadcast_frames),
869 	GBE_STATSB_INFO(tx_multicast_frames),
870 	GBE_STATSB_INFO(tx_pause_frames),
871 	GBE_STATSB_INFO(tx_deferred_frames),
872 	GBE_STATSB_INFO(tx_collision_frames),
873 	GBE_STATSB_INFO(tx_single_coll_frames),
874 	GBE_STATSB_INFO(tx_mult_coll_frames),
875 	GBE_STATSB_INFO(tx_excessive_collisions),
876 	GBE_STATSB_INFO(tx_late_collisions),
877 	GBE_STATSB_INFO(tx_underrun),
878 	GBE_STATSB_INFO(tx_carrier_sense_errors),
879 	GBE_STATSB_INFO(tx_bytes),
880 	GBE_STATSB_INFO(tx_64byte_frames),
881 	GBE_STATSB_INFO(tx_65_to_127byte_frames),
882 	GBE_STATSB_INFO(tx_128_to_255byte_frames),
883 	GBE_STATSB_INFO(tx_256_to_511byte_frames),
884 	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
885 	GBE_STATSB_INFO(tx_1024byte_frames),
886 	GBE_STATSB_INFO(net_bytes),
887 	GBE_STATSB_INFO(rx_sof_overruns),
888 	GBE_STATSB_INFO(rx_mof_overruns),
889 	GBE_STATSB_INFO(rx_dma_overruns),
890 	/* GBE module C */
891 	GBE_STATSC_INFO(rx_good_frames),
892 	GBE_STATSC_INFO(rx_broadcast_frames),
893 	GBE_STATSC_INFO(rx_multicast_frames),
894 	GBE_STATSC_INFO(rx_pause_frames),
895 	GBE_STATSC_INFO(rx_crc_errors),
896 	GBE_STATSC_INFO(rx_align_code_errors),
897 	GBE_STATSC_INFO(rx_oversized_frames),
898 	GBE_STATSC_INFO(rx_jabber_frames),
899 	GBE_STATSC_INFO(rx_undersized_frames),
900 	GBE_STATSC_INFO(rx_fragments),
901 	GBE_STATSC_INFO(rx_bytes),
902 	GBE_STATSC_INFO(tx_good_frames),
903 	GBE_STATSC_INFO(tx_broadcast_frames),
904 	GBE_STATSC_INFO(tx_multicast_frames),
905 	GBE_STATSC_INFO(tx_pause_frames),
906 	GBE_STATSC_INFO(tx_deferred_frames),
907 	GBE_STATSC_INFO(tx_collision_frames),
908 	GBE_STATSC_INFO(tx_single_coll_frames),
909 	GBE_STATSC_INFO(tx_mult_coll_frames),
910 	GBE_STATSC_INFO(tx_excessive_collisions),
911 	GBE_STATSC_INFO(tx_late_collisions),
912 	GBE_STATSC_INFO(tx_underrun),
913 	GBE_STATSC_INFO(tx_carrier_sense_errors),
914 	GBE_STATSC_INFO(tx_bytes),
915 	GBE_STATSC_INFO(tx_64byte_frames),
916 	GBE_STATSC_INFO(tx_65_to_127byte_frames),
917 	GBE_STATSC_INFO(tx_128_to_255byte_frames),
918 	GBE_STATSC_INFO(tx_256_to_511byte_frames),
919 	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
920 	GBE_STATSC_INFO(tx_1024byte_frames),
921 	GBE_STATSC_INFO(net_bytes),
922 	GBE_STATSC_INFO(rx_sof_overruns),
923 	GBE_STATSC_INFO(rx_mof_overruns),
924 	GBE_STATSC_INFO(rx_dma_overruns),
925 	/* GBE module D */
926 	GBE_STATSD_INFO(rx_good_frames),
927 	GBE_STATSD_INFO(rx_broadcast_frames),
928 	GBE_STATSD_INFO(rx_multicast_frames),
929 	GBE_STATSD_INFO(rx_pause_frames),
930 	GBE_STATSD_INFO(rx_crc_errors),
931 	GBE_STATSD_INFO(rx_align_code_errors),
932 	GBE_STATSD_INFO(rx_oversized_frames),
933 	GBE_STATSD_INFO(rx_jabber_frames),
934 	GBE_STATSD_INFO(rx_undersized_frames),
935 	GBE_STATSD_INFO(rx_fragments),
936 	GBE_STATSD_INFO(rx_bytes),
937 	GBE_STATSD_INFO(tx_good_frames),
938 	GBE_STATSD_INFO(tx_broadcast_frames),
939 	GBE_STATSD_INFO(tx_multicast_frames),
940 	GBE_STATSD_INFO(tx_pause_frames),
941 	GBE_STATSD_INFO(tx_deferred_frames),
942 	GBE_STATSD_INFO(tx_collision_frames),
943 	GBE_STATSD_INFO(tx_single_coll_frames),
944 	GBE_STATSD_INFO(tx_mult_coll_frames),
945 	GBE_STATSD_INFO(tx_excessive_collisions),
946 	GBE_STATSD_INFO(tx_late_collisions),
947 	GBE_STATSD_INFO(tx_underrun),
948 	GBE_STATSD_INFO(tx_carrier_sense_errors),
949 	GBE_STATSD_INFO(tx_bytes),
950 	GBE_STATSD_INFO(tx_64byte_frames),
951 	GBE_STATSD_INFO(tx_65_to_127byte_frames),
952 	GBE_STATSD_INFO(tx_128_to_255byte_frames),
953 	GBE_STATSD_INFO(tx_256_to_511byte_frames),
954 	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
955 	GBE_STATSD_INFO(tx_1024byte_frames),
956 	GBE_STATSD_INFO(net_bytes),
957 	GBE_STATSD_INFO(rx_sof_overruns),
958 	GBE_STATSD_INFO(rx_mof_overruns),
959 	GBE_STATSD_INFO(rx_dma_overruns),
960 };
961 
962 /* This is the size of entries in GBENU_STATS_HOST */
963 #define GBENU_ET_STATS_HOST_SIZE	52
964 
965 #define GBENU_STATS_HOST(field)					\
966 {								\
967 	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
968 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
969 	offsetof(struct gbenu_hw_stats, field)			\
970 }
971 
972 /* This is the size of entries in GBENU_STATS_PORT */
973 #define GBENU_ET_STATS_PORT_SIZE	65
974 
975 #define GBENU_STATS_P1(field)					\
976 {								\
977 	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
978 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
979 	offsetof(struct gbenu_hw_stats, field)			\
980 }
981 
982 #define GBENU_STATS_P2(field)					\
983 {								\
984 	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
985 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
986 	offsetof(struct gbenu_hw_stats, field)			\
987 }
988 
989 #define GBENU_STATS_P3(field)					\
990 {								\
991 	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
992 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
993 	offsetof(struct gbenu_hw_stats, field)			\
994 }
995 
996 #define GBENU_STATS_P4(field)					\
997 {								\
998 	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
999 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1000 	offsetof(struct gbenu_hw_stats, field)			\
1001 }
1002 
1003 #define GBENU_STATS_P5(field)					\
1004 {								\
1005 	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
1006 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1007 	offsetof(struct gbenu_hw_stats, field)			\
1008 }
1009 
1010 #define GBENU_STATS_P6(field)					\
1011 {								\
1012 	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
1013 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1014 	offsetof(struct gbenu_hw_stats, field)			\
1015 }
1016 
1017 #define GBENU_STATS_P7(field)					\
1018 {								\
1019 	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1020 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1021 	offsetof(struct gbenu_hw_stats, field)			\
1022 }
1023 
1024 #define GBENU_STATS_P8(field)					\
1025 {								\
1026 	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1027 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
1028 	offsetof(struct gbenu_hw_stats, field)			\
1029 }
1030 
1031 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1032 	/* GBENU Host Module */
1033 	GBENU_STATS_HOST(rx_good_frames),
1034 	GBENU_STATS_HOST(rx_broadcast_frames),
1035 	GBENU_STATS_HOST(rx_multicast_frames),
1036 	GBENU_STATS_HOST(rx_crc_errors),
1037 	GBENU_STATS_HOST(rx_oversized_frames),
1038 	GBENU_STATS_HOST(rx_undersized_frames),
1039 	GBENU_STATS_HOST(ale_drop),
1040 	GBENU_STATS_HOST(ale_overrun_drop),
1041 	GBENU_STATS_HOST(rx_bytes),
1042 	GBENU_STATS_HOST(tx_good_frames),
1043 	GBENU_STATS_HOST(tx_broadcast_frames),
1044 	GBENU_STATS_HOST(tx_multicast_frames),
1045 	GBENU_STATS_HOST(tx_bytes),
1046 	GBENU_STATS_HOST(tx_64B_frames),
1047 	GBENU_STATS_HOST(tx_65_to_127B_frames),
1048 	GBENU_STATS_HOST(tx_128_to_255B_frames),
1049 	GBENU_STATS_HOST(tx_256_to_511B_frames),
1050 	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1051 	GBENU_STATS_HOST(tx_1024B_frames),
1052 	GBENU_STATS_HOST(net_bytes),
1053 	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1054 	GBENU_STATS_HOST(rx_port_mask_drop),
1055 	GBENU_STATS_HOST(rx_top_fifo_drop),
1056 	GBENU_STATS_HOST(ale_rate_limit_drop),
1057 	GBENU_STATS_HOST(ale_vid_ingress_drop),
1058 	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1059 	GBENU_STATS_HOST(ale_unknown_ucast),
1060 	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1061 	GBENU_STATS_HOST(ale_unknown_mcast),
1062 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1063 	GBENU_STATS_HOST(ale_unknown_bcast),
1064 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1065 	GBENU_STATS_HOST(ale_pol_match),
1066 	GBENU_STATS_HOST(ale_pol_match_red),
1067 	GBENU_STATS_HOST(ale_pol_match_yellow),
1068 	GBENU_STATS_HOST(tx_mem_protect_err),
1069 	GBENU_STATS_HOST(tx_pri0_drop),
1070 	GBENU_STATS_HOST(tx_pri1_drop),
1071 	GBENU_STATS_HOST(tx_pri2_drop),
1072 	GBENU_STATS_HOST(tx_pri3_drop),
1073 	GBENU_STATS_HOST(tx_pri4_drop),
1074 	GBENU_STATS_HOST(tx_pri5_drop),
1075 	GBENU_STATS_HOST(tx_pri6_drop),
1076 	GBENU_STATS_HOST(tx_pri7_drop),
1077 	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1078 	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1079 	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1080 	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1081 	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1082 	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1083 	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1084 	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1085 	/* GBENU Module 1 */
1086 	GBENU_STATS_P1(rx_good_frames),
1087 	GBENU_STATS_P1(rx_broadcast_frames),
1088 	GBENU_STATS_P1(rx_multicast_frames),
1089 	GBENU_STATS_P1(rx_pause_frames),
1090 	GBENU_STATS_P1(rx_crc_errors),
1091 	GBENU_STATS_P1(rx_align_code_errors),
1092 	GBENU_STATS_P1(rx_oversized_frames),
1093 	GBENU_STATS_P1(rx_jabber_frames),
1094 	GBENU_STATS_P1(rx_undersized_frames),
1095 	GBENU_STATS_P1(rx_fragments),
1096 	GBENU_STATS_P1(ale_drop),
1097 	GBENU_STATS_P1(ale_overrun_drop),
1098 	GBENU_STATS_P1(rx_bytes),
1099 	GBENU_STATS_P1(tx_good_frames),
1100 	GBENU_STATS_P1(tx_broadcast_frames),
1101 	GBENU_STATS_P1(tx_multicast_frames),
1102 	GBENU_STATS_P1(tx_pause_frames),
1103 	GBENU_STATS_P1(tx_deferred_frames),
1104 	GBENU_STATS_P1(tx_collision_frames),
1105 	GBENU_STATS_P1(tx_single_coll_frames),
1106 	GBENU_STATS_P1(tx_mult_coll_frames),
1107 	GBENU_STATS_P1(tx_excessive_collisions),
1108 	GBENU_STATS_P1(tx_late_collisions),
1109 	GBENU_STATS_P1(rx_ipg_error),
1110 	GBENU_STATS_P1(tx_carrier_sense_errors),
1111 	GBENU_STATS_P1(tx_bytes),
1112 	GBENU_STATS_P1(tx_64B_frames),
1113 	GBENU_STATS_P1(tx_65_to_127B_frames),
1114 	GBENU_STATS_P1(tx_128_to_255B_frames),
1115 	GBENU_STATS_P1(tx_256_to_511B_frames),
1116 	GBENU_STATS_P1(tx_512_to_1023B_frames),
1117 	GBENU_STATS_P1(tx_1024B_frames),
1118 	GBENU_STATS_P1(net_bytes),
1119 	GBENU_STATS_P1(rx_bottom_fifo_drop),
1120 	GBENU_STATS_P1(rx_port_mask_drop),
1121 	GBENU_STATS_P1(rx_top_fifo_drop),
1122 	GBENU_STATS_P1(ale_rate_limit_drop),
1123 	GBENU_STATS_P1(ale_vid_ingress_drop),
1124 	GBENU_STATS_P1(ale_da_eq_sa_drop),
1125 	GBENU_STATS_P1(ale_unknown_ucast),
1126 	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1127 	GBENU_STATS_P1(ale_unknown_mcast),
1128 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1129 	GBENU_STATS_P1(ale_unknown_bcast),
1130 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1131 	GBENU_STATS_P1(ale_pol_match),
1132 	GBENU_STATS_P1(ale_pol_match_red),
1133 	GBENU_STATS_P1(ale_pol_match_yellow),
1134 	GBENU_STATS_P1(tx_mem_protect_err),
1135 	GBENU_STATS_P1(tx_pri0_drop),
1136 	GBENU_STATS_P1(tx_pri1_drop),
1137 	GBENU_STATS_P1(tx_pri2_drop),
1138 	GBENU_STATS_P1(tx_pri3_drop),
1139 	GBENU_STATS_P1(tx_pri4_drop),
1140 	GBENU_STATS_P1(tx_pri5_drop),
1141 	GBENU_STATS_P1(tx_pri6_drop),
1142 	GBENU_STATS_P1(tx_pri7_drop),
1143 	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1144 	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1145 	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1146 	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1147 	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1148 	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1149 	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1150 	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1151 	/* GBENU Module 2 */
1152 	GBENU_STATS_P2(rx_good_frames),
1153 	GBENU_STATS_P2(rx_broadcast_frames),
1154 	GBENU_STATS_P2(rx_multicast_frames),
1155 	GBENU_STATS_P2(rx_pause_frames),
1156 	GBENU_STATS_P2(rx_crc_errors),
1157 	GBENU_STATS_P2(rx_align_code_errors),
1158 	GBENU_STATS_P2(rx_oversized_frames),
1159 	GBENU_STATS_P2(rx_jabber_frames),
1160 	GBENU_STATS_P2(rx_undersized_frames),
1161 	GBENU_STATS_P2(rx_fragments),
1162 	GBENU_STATS_P2(ale_drop),
1163 	GBENU_STATS_P2(ale_overrun_drop),
1164 	GBENU_STATS_P2(rx_bytes),
1165 	GBENU_STATS_P2(tx_good_frames),
1166 	GBENU_STATS_P2(tx_broadcast_frames),
1167 	GBENU_STATS_P2(tx_multicast_frames),
1168 	GBENU_STATS_P2(tx_pause_frames),
1169 	GBENU_STATS_P2(tx_deferred_frames),
1170 	GBENU_STATS_P2(tx_collision_frames),
1171 	GBENU_STATS_P2(tx_single_coll_frames),
1172 	GBENU_STATS_P2(tx_mult_coll_frames),
1173 	GBENU_STATS_P2(tx_excessive_collisions),
1174 	GBENU_STATS_P2(tx_late_collisions),
1175 	GBENU_STATS_P2(rx_ipg_error),
1176 	GBENU_STATS_P2(tx_carrier_sense_errors),
1177 	GBENU_STATS_P2(tx_bytes),
1178 	GBENU_STATS_P2(tx_64B_frames),
1179 	GBENU_STATS_P2(tx_65_to_127B_frames),
1180 	GBENU_STATS_P2(tx_128_to_255B_frames),
1181 	GBENU_STATS_P2(tx_256_to_511B_frames),
1182 	GBENU_STATS_P2(tx_512_to_1023B_frames),
1183 	GBENU_STATS_P2(tx_1024B_frames),
1184 	GBENU_STATS_P2(net_bytes),
1185 	GBENU_STATS_P2(rx_bottom_fifo_drop),
1186 	GBENU_STATS_P2(rx_port_mask_drop),
1187 	GBENU_STATS_P2(rx_top_fifo_drop),
1188 	GBENU_STATS_P2(ale_rate_limit_drop),
1189 	GBENU_STATS_P2(ale_vid_ingress_drop),
1190 	GBENU_STATS_P2(ale_da_eq_sa_drop),
1191 	GBENU_STATS_P2(ale_unknown_ucast),
1192 	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1193 	GBENU_STATS_P2(ale_unknown_mcast),
1194 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1195 	GBENU_STATS_P2(ale_unknown_bcast),
1196 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1197 	GBENU_STATS_P2(ale_pol_match),
1198 	GBENU_STATS_P2(ale_pol_match_red),
1199 	GBENU_STATS_P2(ale_pol_match_yellow),
1200 	GBENU_STATS_P2(tx_mem_protect_err),
1201 	GBENU_STATS_P2(tx_pri0_drop),
1202 	GBENU_STATS_P2(tx_pri1_drop),
1203 	GBENU_STATS_P2(tx_pri2_drop),
1204 	GBENU_STATS_P2(tx_pri3_drop),
1205 	GBENU_STATS_P2(tx_pri4_drop),
1206 	GBENU_STATS_P2(tx_pri5_drop),
1207 	GBENU_STATS_P2(tx_pri6_drop),
1208 	GBENU_STATS_P2(tx_pri7_drop),
1209 	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1210 	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1211 	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1212 	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1213 	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1214 	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1215 	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1216 	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1217 	/* GBENU Module 3 */
1218 	GBENU_STATS_P3(rx_good_frames),
1219 	GBENU_STATS_P3(rx_broadcast_frames),
1220 	GBENU_STATS_P3(rx_multicast_frames),
1221 	GBENU_STATS_P3(rx_pause_frames),
1222 	GBENU_STATS_P3(rx_crc_errors),
1223 	GBENU_STATS_P3(rx_align_code_errors),
1224 	GBENU_STATS_P3(rx_oversized_frames),
1225 	GBENU_STATS_P3(rx_jabber_frames),
1226 	GBENU_STATS_P3(rx_undersized_frames),
1227 	GBENU_STATS_P3(rx_fragments),
1228 	GBENU_STATS_P3(ale_drop),
1229 	GBENU_STATS_P3(ale_overrun_drop),
1230 	GBENU_STATS_P3(rx_bytes),
1231 	GBENU_STATS_P3(tx_good_frames),
1232 	GBENU_STATS_P3(tx_broadcast_frames),
1233 	GBENU_STATS_P3(tx_multicast_frames),
1234 	GBENU_STATS_P3(tx_pause_frames),
1235 	GBENU_STATS_P3(tx_deferred_frames),
1236 	GBENU_STATS_P3(tx_collision_frames),
1237 	GBENU_STATS_P3(tx_single_coll_frames),
1238 	GBENU_STATS_P3(tx_mult_coll_frames),
1239 	GBENU_STATS_P3(tx_excessive_collisions),
1240 	GBENU_STATS_P3(tx_late_collisions),
1241 	GBENU_STATS_P3(rx_ipg_error),
1242 	GBENU_STATS_P3(tx_carrier_sense_errors),
1243 	GBENU_STATS_P3(tx_bytes),
1244 	GBENU_STATS_P3(tx_64B_frames),
1245 	GBENU_STATS_P3(tx_65_to_127B_frames),
1246 	GBENU_STATS_P3(tx_128_to_255B_frames),
1247 	GBENU_STATS_P3(tx_256_to_511B_frames),
1248 	GBENU_STATS_P3(tx_512_to_1023B_frames),
1249 	GBENU_STATS_P3(tx_1024B_frames),
1250 	GBENU_STATS_P3(net_bytes),
1251 	GBENU_STATS_P3(rx_bottom_fifo_drop),
1252 	GBENU_STATS_P3(rx_port_mask_drop),
1253 	GBENU_STATS_P3(rx_top_fifo_drop),
1254 	GBENU_STATS_P3(ale_rate_limit_drop),
1255 	GBENU_STATS_P3(ale_vid_ingress_drop),
1256 	GBENU_STATS_P3(ale_da_eq_sa_drop),
1257 	GBENU_STATS_P3(ale_unknown_ucast),
1258 	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1259 	GBENU_STATS_P3(ale_unknown_mcast),
1260 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1261 	GBENU_STATS_P3(ale_unknown_bcast),
1262 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1263 	GBENU_STATS_P3(ale_pol_match),
1264 	GBENU_STATS_P3(ale_pol_match_red),
1265 	GBENU_STATS_P3(ale_pol_match_yellow),
1266 	GBENU_STATS_P3(tx_mem_protect_err),
1267 	GBENU_STATS_P3(tx_pri0_drop),
1268 	GBENU_STATS_P3(tx_pri1_drop),
1269 	GBENU_STATS_P3(tx_pri2_drop),
1270 	GBENU_STATS_P3(tx_pri3_drop),
1271 	GBENU_STATS_P3(tx_pri4_drop),
1272 	GBENU_STATS_P3(tx_pri5_drop),
1273 	GBENU_STATS_P3(tx_pri6_drop),
1274 	GBENU_STATS_P3(tx_pri7_drop),
1275 	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1276 	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1277 	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1278 	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1279 	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1280 	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1281 	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1282 	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1283 	/* GBENU Module 4 */
1284 	GBENU_STATS_P4(rx_good_frames),
1285 	GBENU_STATS_P4(rx_broadcast_frames),
1286 	GBENU_STATS_P4(rx_multicast_frames),
1287 	GBENU_STATS_P4(rx_pause_frames),
1288 	GBENU_STATS_P4(rx_crc_errors),
1289 	GBENU_STATS_P4(rx_align_code_errors),
1290 	GBENU_STATS_P4(rx_oversized_frames),
1291 	GBENU_STATS_P4(rx_jabber_frames),
1292 	GBENU_STATS_P4(rx_undersized_frames),
1293 	GBENU_STATS_P4(rx_fragments),
1294 	GBENU_STATS_P4(ale_drop),
1295 	GBENU_STATS_P4(ale_overrun_drop),
1296 	GBENU_STATS_P4(rx_bytes),
1297 	GBENU_STATS_P4(tx_good_frames),
1298 	GBENU_STATS_P4(tx_broadcast_frames),
1299 	GBENU_STATS_P4(tx_multicast_frames),
1300 	GBENU_STATS_P4(tx_pause_frames),
1301 	GBENU_STATS_P4(tx_deferred_frames),
1302 	GBENU_STATS_P4(tx_collision_frames),
1303 	GBENU_STATS_P4(tx_single_coll_frames),
1304 	GBENU_STATS_P4(tx_mult_coll_frames),
1305 	GBENU_STATS_P4(tx_excessive_collisions),
1306 	GBENU_STATS_P4(tx_late_collisions),
1307 	GBENU_STATS_P4(rx_ipg_error),
1308 	GBENU_STATS_P4(tx_carrier_sense_errors),
1309 	GBENU_STATS_P4(tx_bytes),
1310 	GBENU_STATS_P4(tx_64B_frames),
1311 	GBENU_STATS_P4(tx_65_to_127B_frames),
1312 	GBENU_STATS_P4(tx_128_to_255B_frames),
1313 	GBENU_STATS_P4(tx_256_to_511B_frames),
1314 	GBENU_STATS_P4(tx_512_to_1023B_frames),
1315 	GBENU_STATS_P4(tx_1024B_frames),
1316 	GBENU_STATS_P4(net_bytes),
1317 	GBENU_STATS_P4(rx_bottom_fifo_drop),
1318 	GBENU_STATS_P4(rx_port_mask_drop),
1319 	GBENU_STATS_P4(rx_top_fifo_drop),
1320 	GBENU_STATS_P4(ale_rate_limit_drop),
1321 	GBENU_STATS_P4(ale_vid_ingress_drop),
1322 	GBENU_STATS_P4(ale_da_eq_sa_drop),
1323 	GBENU_STATS_P4(ale_unknown_ucast),
1324 	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1325 	GBENU_STATS_P4(ale_unknown_mcast),
1326 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1327 	GBENU_STATS_P4(ale_unknown_bcast),
1328 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1329 	GBENU_STATS_P4(ale_pol_match),
1330 	GBENU_STATS_P4(ale_pol_match_red),
1331 	GBENU_STATS_P4(ale_pol_match_yellow),
1332 	GBENU_STATS_P4(tx_mem_protect_err),
1333 	GBENU_STATS_P4(tx_pri0_drop),
1334 	GBENU_STATS_P4(tx_pri1_drop),
1335 	GBENU_STATS_P4(tx_pri2_drop),
1336 	GBENU_STATS_P4(tx_pri3_drop),
1337 	GBENU_STATS_P4(tx_pri4_drop),
1338 	GBENU_STATS_P4(tx_pri5_drop),
1339 	GBENU_STATS_P4(tx_pri6_drop),
1340 	GBENU_STATS_P4(tx_pri7_drop),
1341 	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1342 	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1343 	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1344 	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1345 	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1346 	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1347 	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1348 	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1349 	/* GBENU Module 5 */
1350 	GBENU_STATS_P5(rx_good_frames),
1351 	GBENU_STATS_P5(rx_broadcast_frames),
1352 	GBENU_STATS_P5(rx_multicast_frames),
1353 	GBENU_STATS_P5(rx_pause_frames),
1354 	GBENU_STATS_P5(rx_crc_errors),
1355 	GBENU_STATS_P5(rx_align_code_errors),
1356 	GBENU_STATS_P5(rx_oversized_frames),
1357 	GBENU_STATS_P5(rx_jabber_frames),
1358 	GBENU_STATS_P5(rx_undersized_frames),
1359 	GBENU_STATS_P5(rx_fragments),
1360 	GBENU_STATS_P5(ale_drop),
1361 	GBENU_STATS_P5(ale_overrun_drop),
1362 	GBENU_STATS_P5(rx_bytes),
1363 	GBENU_STATS_P5(tx_good_frames),
1364 	GBENU_STATS_P5(tx_broadcast_frames),
1365 	GBENU_STATS_P5(tx_multicast_frames),
1366 	GBENU_STATS_P5(tx_pause_frames),
1367 	GBENU_STATS_P5(tx_deferred_frames),
1368 	GBENU_STATS_P5(tx_collision_frames),
1369 	GBENU_STATS_P5(tx_single_coll_frames),
1370 	GBENU_STATS_P5(tx_mult_coll_frames),
1371 	GBENU_STATS_P5(tx_excessive_collisions),
1372 	GBENU_STATS_P5(tx_late_collisions),
1373 	GBENU_STATS_P5(rx_ipg_error),
1374 	GBENU_STATS_P5(tx_carrier_sense_errors),
1375 	GBENU_STATS_P5(tx_bytes),
1376 	GBENU_STATS_P5(tx_64B_frames),
1377 	GBENU_STATS_P5(tx_65_to_127B_frames),
1378 	GBENU_STATS_P5(tx_128_to_255B_frames),
1379 	GBENU_STATS_P5(tx_256_to_511B_frames),
1380 	GBENU_STATS_P5(tx_512_to_1023B_frames),
1381 	GBENU_STATS_P5(tx_1024B_frames),
1382 	GBENU_STATS_P5(net_bytes),
1383 	GBENU_STATS_P5(rx_bottom_fifo_drop),
1384 	GBENU_STATS_P5(rx_port_mask_drop),
1385 	GBENU_STATS_P5(rx_top_fifo_drop),
1386 	GBENU_STATS_P5(ale_rate_limit_drop),
1387 	GBENU_STATS_P5(ale_vid_ingress_drop),
1388 	GBENU_STATS_P5(ale_da_eq_sa_drop),
1389 	GBENU_STATS_P5(ale_unknown_ucast),
1390 	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1391 	GBENU_STATS_P5(ale_unknown_mcast),
1392 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1393 	GBENU_STATS_P5(ale_unknown_bcast),
1394 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1395 	GBENU_STATS_P5(ale_pol_match),
1396 	GBENU_STATS_P5(ale_pol_match_red),
1397 	GBENU_STATS_P5(ale_pol_match_yellow),
1398 	GBENU_STATS_P5(tx_mem_protect_err),
1399 	GBENU_STATS_P5(tx_pri0_drop),
1400 	GBENU_STATS_P5(tx_pri1_drop),
1401 	GBENU_STATS_P5(tx_pri2_drop),
1402 	GBENU_STATS_P5(tx_pri3_drop),
1403 	GBENU_STATS_P5(tx_pri4_drop),
1404 	GBENU_STATS_P5(tx_pri5_drop),
1405 	GBENU_STATS_P5(tx_pri6_drop),
1406 	GBENU_STATS_P5(tx_pri7_drop),
1407 	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1408 	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1409 	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1410 	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1411 	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1412 	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1413 	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1414 	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1415 	/* GBENU Module 6 */
1416 	GBENU_STATS_P6(rx_good_frames),
1417 	GBENU_STATS_P6(rx_broadcast_frames),
1418 	GBENU_STATS_P6(rx_multicast_frames),
1419 	GBENU_STATS_P6(rx_pause_frames),
1420 	GBENU_STATS_P6(rx_crc_errors),
1421 	GBENU_STATS_P6(rx_align_code_errors),
1422 	GBENU_STATS_P6(rx_oversized_frames),
1423 	GBENU_STATS_P6(rx_jabber_frames),
1424 	GBENU_STATS_P6(rx_undersized_frames),
1425 	GBENU_STATS_P6(rx_fragments),
1426 	GBENU_STATS_P6(ale_drop),
1427 	GBENU_STATS_P6(ale_overrun_drop),
1428 	GBENU_STATS_P6(rx_bytes),
1429 	GBENU_STATS_P6(tx_good_frames),
1430 	GBENU_STATS_P6(tx_broadcast_frames),
1431 	GBENU_STATS_P6(tx_multicast_frames),
1432 	GBENU_STATS_P6(tx_pause_frames),
1433 	GBENU_STATS_P6(tx_deferred_frames),
1434 	GBENU_STATS_P6(tx_collision_frames),
1435 	GBENU_STATS_P6(tx_single_coll_frames),
1436 	GBENU_STATS_P6(tx_mult_coll_frames),
1437 	GBENU_STATS_P6(tx_excessive_collisions),
1438 	GBENU_STATS_P6(tx_late_collisions),
1439 	GBENU_STATS_P6(rx_ipg_error),
1440 	GBENU_STATS_P6(tx_carrier_sense_errors),
1441 	GBENU_STATS_P6(tx_bytes),
1442 	GBENU_STATS_P6(tx_64B_frames),
1443 	GBENU_STATS_P6(tx_65_to_127B_frames),
1444 	GBENU_STATS_P6(tx_128_to_255B_frames),
1445 	GBENU_STATS_P6(tx_256_to_511B_frames),
1446 	GBENU_STATS_P6(tx_512_to_1023B_frames),
1447 	GBENU_STATS_P6(tx_1024B_frames),
1448 	GBENU_STATS_P6(net_bytes),
1449 	GBENU_STATS_P6(rx_bottom_fifo_drop),
1450 	GBENU_STATS_P6(rx_port_mask_drop),
1451 	GBENU_STATS_P6(rx_top_fifo_drop),
1452 	GBENU_STATS_P6(ale_rate_limit_drop),
1453 	GBENU_STATS_P6(ale_vid_ingress_drop),
1454 	GBENU_STATS_P6(ale_da_eq_sa_drop),
1455 	GBENU_STATS_P6(ale_unknown_ucast),
1456 	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1457 	GBENU_STATS_P6(ale_unknown_mcast),
1458 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1459 	GBENU_STATS_P6(ale_unknown_bcast),
1460 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1461 	GBENU_STATS_P6(ale_pol_match),
1462 	GBENU_STATS_P6(ale_pol_match_red),
1463 	GBENU_STATS_P6(ale_pol_match_yellow),
1464 	GBENU_STATS_P6(tx_mem_protect_err),
1465 	GBENU_STATS_P6(tx_pri0_drop),
1466 	GBENU_STATS_P6(tx_pri1_drop),
1467 	GBENU_STATS_P6(tx_pri2_drop),
1468 	GBENU_STATS_P6(tx_pri3_drop),
1469 	GBENU_STATS_P6(tx_pri4_drop),
1470 	GBENU_STATS_P6(tx_pri5_drop),
1471 	GBENU_STATS_P6(tx_pri6_drop),
1472 	GBENU_STATS_P6(tx_pri7_drop),
1473 	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1474 	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1475 	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1476 	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1477 	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1478 	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1479 	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1480 	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1481 	/* GBENU Module 7 */
1482 	GBENU_STATS_P7(rx_good_frames),
1483 	GBENU_STATS_P7(rx_broadcast_frames),
1484 	GBENU_STATS_P7(rx_multicast_frames),
1485 	GBENU_STATS_P7(rx_pause_frames),
1486 	GBENU_STATS_P7(rx_crc_errors),
1487 	GBENU_STATS_P7(rx_align_code_errors),
1488 	GBENU_STATS_P7(rx_oversized_frames),
1489 	GBENU_STATS_P7(rx_jabber_frames),
1490 	GBENU_STATS_P7(rx_undersized_frames),
1491 	GBENU_STATS_P7(rx_fragments),
1492 	GBENU_STATS_P7(ale_drop),
1493 	GBENU_STATS_P7(ale_overrun_drop),
1494 	GBENU_STATS_P7(rx_bytes),
1495 	GBENU_STATS_P7(tx_good_frames),
1496 	GBENU_STATS_P7(tx_broadcast_frames),
1497 	GBENU_STATS_P7(tx_multicast_frames),
1498 	GBENU_STATS_P7(tx_pause_frames),
1499 	GBENU_STATS_P7(tx_deferred_frames),
1500 	GBENU_STATS_P7(tx_collision_frames),
1501 	GBENU_STATS_P7(tx_single_coll_frames),
1502 	GBENU_STATS_P7(tx_mult_coll_frames),
1503 	GBENU_STATS_P7(tx_excessive_collisions),
1504 	GBENU_STATS_P7(tx_late_collisions),
1505 	GBENU_STATS_P7(rx_ipg_error),
1506 	GBENU_STATS_P7(tx_carrier_sense_errors),
1507 	GBENU_STATS_P7(tx_bytes),
1508 	GBENU_STATS_P7(tx_64B_frames),
1509 	GBENU_STATS_P7(tx_65_to_127B_frames),
1510 	GBENU_STATS_P7(tx_128_to_255B_frames),
1511 	GBENU_STATS_P7(tx_256_to_511B_frames),
1512 	GBENU_STATS_P7(tx_512_to_1023B_frames),
1513 	GBENU_STATS_P7(tx_1024B_frames),
1514 	GBENU_STATS_P7(net_bytes),
1515 	GBENU_STATS_P7(rx_bottom_fifo_drop),
1516 	GBENU_STATS_P7(rx_port_mask_drop),
1517 	GBENU_STATS_P7(rx_top_fifo_drop),
1518 	GBENU_STATS_P7(ale_rate_limit_drop),
1519 	GBENU_STATS_P7(ale_vid_ingress_drop),
1520 	GBENU_STATS_P7(ale_da_eq_sa_drop),
1521 	GBENU_STATS_P7(ale_unknown_ucast),
1522 	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1523 	GBENU_STATS_P7(ale_unknown_mcast),
1524 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1525 	GBENU_STATS_P7(ale_unknown_bcast),
1526 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1527 	GBENU_STATS_P7(ale_pol_match),
1528 	GBENU_STATS_P7(ale_pol_match_red),
1529 	GBENU_STATS_P7(ale_pol_match_yellow),
1530 	GBENU_STATS_P7(tx_mem_protect_err),
1531 	GBENU_STATS_P7(tx_pri0_drop),
1532 	GBENU_STATS_P7(tx_pri1_drop),
1533 	GBENU_STATS_P7(tx_pri2_drop),
1534 	GBENU_STATS_P7(tx_pri3_drop),
1535 	GBENU_STATS_P7(tx_pri4_drop),
1536 	GBENU_STATS_P7(tx_pri5_drop),
1537 	GBENU_STATS_P7(tx_pri6_drop),
1538 	GBENU_STATS_P7(tx_pri7_drop),
1539 	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1540 	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1541 	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1542 	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1543 	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1544 	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1545 	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1546 	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1547 	/* GBENU Module 8 */
1548 	GBENU_STATS_P8(rx_good_frames),
1549 	GBENU_STATS_P8(rx_broadcast_frames),
1550 	GBENU_STATS_P8(rx_multicast_frames),
1551 	GBENU_STATS_P8(rx_pause_frames),
1552 	GBENU_STATS_P8(rx_crc_errors),
1553 	GBENU_STATS_P8(rx_align_code_errors),
1554 	GBENU_STATS_P8(rx_oversized_frames),
1555 	GBENU_STATS_P8(rx_jabber_frames),
1556 	GBENU_STATS_P8(rx_undersized_frames),
1557 	GBENU_STATS_P8(rx_fragments),
1558 	GBENU_STATS_P8(ale_drop),
1559 	GBENU_STATS_P8(ale_overrun_drop),
1560 	GBENU_STATS_P8(rx_bytes),
1561 	GBENU_STATS_P8(tx_good_frames),
1562 	GBENU_STATS_P8(tx_broadcast_frames),
1563 	GBENU_STATS_P8(tx_multicast_frames),
1564 	GBENU_STATS_P8(tx_pause_frames),
1565 	GBENU_STATS_P8(tx_deferred_frames),
1566 	GBENU_STATS_P8(tx_collision_frames),
1567 	GBENU_STATS_P8(tx_single_coll_frames),
1568 	GBENU_STATS_P8(tx_mult_coll_frames),
1569 	GBENU_STATS_P8(tx_excessive_collisions),
1570 	GBENU_STATS_P8(tx_late_collisions),
1571 	GBENU_STATS_P8(rx_ipg_error),
1572 	GBENU_STATS_P8(tx_carrier_sense_errors),
1573 	GBENU_STATS_P8(tx_bytes),
1574 	GBENU_STATS_P8(tx_64B_frames),
1575 	GBENU_STATS_P8(tx_65_to_127B_frames),
1576 	GBENU_STATS_P8(tx_128_to_255B_frames),
1577 	GBENU_STATS_P8(tx_256_to_511B_frames),
1578 	GBENU_STATS_P8(tx_512_to_1023B_frames),
1579 	GBENU_STATS_P8(tx_1024B_frames),
1580 	GBENU_STATS_P8(net_bytes),
1581 	GBENU_STATS_P8(rx_bottom_fifo_drop),
1582 	GBENU_STATS_P8(rx_port_mask_drop),
1583 	GBENU_STATS_P8(rx_top_fifo_drop),
1584 	GBENU_STATS_P8(ale_rate_limit_drop),
1585 	GBENU_STATS_P8(ale_vid_ingress_drop),
1586 	GBENU_STATS_P8(ale_da_eq_sa_drop),
1587 	GBENU_STATS_P8(ale_unknown_ucast),
1588 	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1589 	GBENU_STATS_P8(ale_unknown_mcast),
1590 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1591 	GBENU_STATS_P8(ale_unknown_bcast),
1592 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1593 	GBENU_STATS_P8(ale_pol_match),
1594 	GBENU_STATS_P8(ale_pol_match_red),
1595 	GBENU_STATS_P8(ale_pol_match_yellow),
1596 	GBENU_STATS_P8(tx_mem_protect_err),
1597 	GBENU_STATS_P8(tx_pri0_drop),
1598 	GBENU_STATS_P8(tx_pri1_drop),
1599 	GBENU_STATS_P8(tx_pri2_drop),
1600 	GBENU_STATS_P8(tx_pri3_drop),
1601 	GBENU_STATS_P8(tx_pri4_drop),
1602 	GBENU_STATS_P8(tx_pri5_drop),
1603 	GBENU_STATS_P8(tx_pri6_drop),
1604 	GBENU_STATS_P8(tx_pri7_drop),
1605 	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1606 	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1607 	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1608 	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1609 	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1610 	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1611 	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1612 	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1613 };
1614 
1615 #define XGBE_STATS0_INFO(field)				\
1616 {							\
1617 	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1618 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1619 	offsetof(struct xgbe_hw_stats, field)		\
1620 }
1621 
1622 #define XGBE_STATS1_INFO(field)				\
1623 {							\
1624 	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1625 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1626 	offsetof(struct xgbe_hw_stats, field)		\
1627 }
1628 
1629 #define XGBE_STATS2_INFO(field)				\
1630 {							\
1631 	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1632 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1633 	offsetof(struct xgbe_hw_stats, field)		\
1634 }
1635 
1636 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1637 	/* GBE module 0 */
1638 	XGBE_STATS0_INFO(rx_good_frames),
1639 	XGBE_STATS0_INFO(rx_broadcast_frames),
1640 	XGBE_STATS0_INFO(rx_multicast_frames),
1641 	XGBE_STATS0_INFO(rx_oversized_frames),
1642 	XGBE_STATS0_INFO(rx_undersized_frames),
1643 	XGBE_STATS0_INFO(overrun_type4),
1644 	XGBE_STATS0_INFO(overrun_type5),
1645 	XGBE_STATS0_INFO(rx_bytes),
1646 	XGBE_STATS0_INFO(tx_good_frames),
1647 	XGBE_STATS0_INFO(tx_broadcast_frames),
1648 	XGBE_STATS0_INFO(tx_multicast_frames),
1649 	XGBE_STATS0_INFO(tx_bytes),
1650 	XGBE_STATS0_INFO(tx_64byte_frames),
1651 	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1652 	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1653 	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1654 	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1655 	XGBE_STATS0_INFO(tx_1024byte_frames),
1656 	XGBE_STATS0_INFO(net_bytes),
1657 	XGBE_STATS0_INFO(rx_sof_overruns),
1658 	XGBE_STATS0_INFO(rx_mof_overruns),
1659 	XGBE_STATS0_INFO(rx_dma_overruns),
1660 	/* XGBE module 1 */
1661 	XGBE_STATS1_INFO(rx_good_frames),
1662 	XGBE_STATS1_INFO(rx_broadcast_frames),
1663 	XGBE_STATS1_INFO(rx_multicast_frames),
1664 	XGBE_STATS1_INFO(rx_pause_frames),
1665 	XGBE_STATS1_INFO(rx_crc_errors),
1666 	XGBE_STATS1_INFO(rx_align_code_errors),
1667 	XGBE_STATS1_INFO(rx_oversized_frames),
1668 	XGBE_STATS1_INFO(rx_jabber_frames),
1669 	XGBE_STATS1_INFO(rx_undersized_frames),
1670 	XGBE_STATS1_INFO(rx_fragments),
1671 	XGBE_STATS1_INFO(overrun_type4),
1672 	XGBE_STATS1_INFO(overrun_type5),
1673 	XGBE_STATS1_INFO(rx_bytes),
1674 	XGBE_STATS1_INFO(tx_good_frames),
1675 	XGBE_STATS1_INFO(tx_broadcast_frames),
1676 	XGBE_STATS1_INFO(tx_multicast_frames),
1677 	XGBE_STATS1_INFO(tx_pause_frames),
1678 	XGBE_STATS1_INFO(tx_deferred_frames),
1679 	XGBE_STATS1_INFO(tx_collision_frames),
1680 	XGBE_STATS1_INFO(tx_single_coll_frames),
1681 	XGBE_STATS1_INFO(tx_mult_coll_frames),
1682 	XGBE_STATS1_INFO(tx_excessive_collisions),
1683 	XGBE_STATS1_INFO(tx_late_collisions),
1684 	XGBE_STATS1_INFO(tx_underrun),
1685 	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1686 	XGBE_STATS1_INFO(tx_bytes),
1687 	XGBE_STATS1_INFO(tx_64byte_frames),
1688 	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1689 	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1690 	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1691 	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1692 	XGBE_STATS1_INFO(tx_1024byte_frames),
1693 	XGBE_STATS1_INFO(net_bytes),
1694 	XGBE_STATS1_INFO(rx_sof_overruns),
1695 	XGBE_STATS1_INFO(rx_mof_overruns),
1696 	XGBE_STATS1_INFO(rx_dma_overruns),
1697 	/* XGBE module 2 */
1698 	XGBE_STATS2_INFO(rx_good_frames),
1699 	XGBE_STATS2_INFO(rx_broadcast_frames),
1700 	XGBE_STATS2_INFO(rx_multicast_frames),
1701 	XGBE_STATS2_INFO(rx_pause_frames),
1702 	XGBE_STATS2_INFO(rx_crc_errors),
1703 	XGBE_STATS2_INFO(rx_align_code_errors),
1704 	XGBE_STATS2_INFO(rx_oversized_frames),
1705 	XGBE_STATS2_INFO(rx_jabber_frames),
1706 	XGBE_STATS2_INFO(rx_undersized_frames),
1707 	XGBE_STATS2_INFO(rx_fragments),
1708 	XGBE_STATS2_INFO(overrun_type4),
1709 	XGBE_STATS2_INFO(overrun_type5),
1710 	XGBE_STATS2_INFO(rx_bytes),
1711 	XGBE_STATS2_INFO(tx_good_frames),
1712 	XGBE_STATS2_INFO(tx_broadcast_frames),
1713 	XGBE_STATS2_INFO(tx_multicast_frames),
1714 	XGBE_STATS2_INFO(tx_pause_frames),
1715 	XGBE_STATS2_INFO(tx_deferred_frames),
1716 	XGBE_STATS2_INFO(tx_collision_frames),
1717 	XGBE_STATS2_INFO(tx_single_coll_frames),
1718 	XGBE_STATS2_INFO(tx_mult_coll_frames),
1719 	XGBE_STATS2_INFO(tx_excessive_collisions),
1720 	XGBE_STATS2_INFO(tx_late_collisions),
1721 	XGBE_STATS2_INFO(tx_underrun),
1722 	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1723 	XGBE_STATS2_INFO(tx_bytes),
1724 	XGBE_STATS2_INFO(tx_64byte_frames),
1725 	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1726 	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1727 	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1728 	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1729 	XGBE_STATS2_INFO(tx_1024byte_frames),
1730 	XGBE_STATS2_INFO(net_bytes),
1731 	XGBE_STATS2_INFO(rx_sof_overruns),
1732 	XGBE_STATS2_INFO(rx_mof_overruns),
1733 	XGBE_STATS2_INFO(rx_dma_overruns),
1734 };
1735 
1736 #define for_each_intf(i, priv) \
1737 	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1738 
1739 #define for_each_sec_slave(slave, priv) \
1740 	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1741 
1742 #define first_sec_slave(priv)					\
1743 	list_first_entry(&priv->secondary_slaves, \
1744 			struct gbe_slave, slave_list)
1745 
1746 static void keystone_get_drvinfo(struct net_device *ndev,
1747 				 struct ethtool_drvinfo *info)
1748 {
1749 	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1750 	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1751 }
1752 
1753 static u32 keystone_get_msglevel(struct net_device *ndev)
1754 {
1755 	struct netcp_intf *netcp = netdev_priv(ndev);
1756 
1757 	return netcp->msg_enable;
1758 }
1759 
1760 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1761 {
1762 	struct netcp_intf *netcp = netdev_priv(ndev);
1763 
1764 	netcp->msg_enable = value;
1765 }
1766 
1767 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1768 {
1769 	struct gbe_intf *gbe_intf;
1770 
1771 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1772 	if (!gbe_intf)
1773 		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1774 
1775 	return gbe_intf;
1776 }
1777 
1778 static void keystone_get_stat_strings(struct net_device *ndev,
1779 				      uint32_t stringset, uint8_t *data)
1780 {
1781 	struct netcp_intf *netcp = netdev_priv(ndev);
1782 	struct gbe_intf *gbe_intf;
1783 	struct gbe_priv *gbe_dev;
1784 	int i;
1785 
1786 	gbe_intf = keystone_get_intf_data(netcp);
1787 	if (!gbe_intf)
1788 		return;
1789 	gbe_dev = gbe_intf->gbe_dev;
1790 
1791 	switch (stringset) {
1792 	case ETH_SS_STATS:
1793 		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1794 			memcpy(data, gbe_dev->et_stats[i].desc,
1795 			       ETH_GSTRING_LEN);
1796 			data += ETH_GSTRING_LEN;
1797 		}
1798 		break;
1799 	case ETH_SS_TEST:
1800 		break;
1801 	}
1802 }
1803 
1804 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1805 {
1806 	struct netcp_intf *netcp = netdev_priv(ndev);
1807 	struct gbe_intf *gbe_intf;
1808 	struct gbe_priv *gbe_dev;
1809 
1810 	gbe_intf = keystone_get_intf_data(netcp);
1811 	if (!gbe_intf)
1812 		return -EINVAL;
1813 	gbe_dev = gbe_intf->gbe_dev;
1814 
1815 	switch (stringset) {
1816 	case ETH_SS_TEST:
1817 		return 0;
1818 	case ETH_SS_STATS:
1819 		return gbe_dev->num_et_stats;
1820 	default:
1821 		return -EINVAL;
1822 	}
1823 }
1824 
1825 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1826 {
1827 	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1828 	u32  __iomem *p_stats_entry;
1829 	int i;
1830 
1831 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1832 		if (gbe_dev->et_stats[i].type == stats_mod) {
1833 			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1834 			gbe_dev->hw_stats[i] = 0;
1835 			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1836 		}
1837 	}
1838 }
1839 
1840 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1841 					     int et_stats_entry)
1842 {
1843 	void __iomem *base = NULL;
1844 	u32  __iomem *p_stats_entry;
1845 	u32 curr, delta;
1846 
1847 	/* The hw_stats_regs pointers are already
1848 	 * properly set to point to the right base:
1849 	 */
1850 	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1851 	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1852 	curr = readl(p_stats_entry);
1853 	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1854 	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1855 	gbe_dev->hw_stats[et_stats_entry] += delta;
1856 }
1857 
1858 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1859 {
1860 	int i;
1861 
1862 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1863 		gbe_update_hw_stats_entry(gbe_dev, i);
1864 
1865 		if (data)
1866 			data[i] = gbe_dev->hw_stats[i];
1867 	}
1868 }
1869 
1870 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1871 					       int stats_mod)
1872 {
1873 	u32 val;
1874 
1875 	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1876 
1877 	switch (stats_mod) {
1878 	case GBE_STATSA_MODULE:
1879 	case GBE_STATSB_MODULE:
1880 		val &= ~GBE_STATS_CD_SEL;
1881 		break;
1882 	case GBE_STATSC_MODULE:
1883 	case GBE_STATSD_MODULE:
1884 		val |= GBE_STATS_CD_SEL;
1885 		break;
1886 	default:
1887 		return;
1888 	}
1889 
1890 	/* make the stat module visible */
1891 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1892 }
1893 
1894 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1895 {
1896 	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1897 	gbe_reset_mod_stats(gbe_dev, stats_mod);
1898 }
1899 
1900 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1901 {
1902 	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1903 	int et_entry, j, pair;
1904 
1905 	for (pair = 0; pair < 2; pair++) {
1906 		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1907 						      GBE_STATSC_MODULE :
1908 						      GBE_STATSA_MODULE));
1909 
1910 		for (j = 0; j < half_num_et_stats; j++) {
1911 			et_entry = pair * half_num_et_stats + j;
1912 			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1913 
1914 			if (data)
1915 				data[et_entry] = gbe_dev->hw_stats[et_entry];
1916 		}
1917 	}
1918 }
1919 
1920 static void keystone_get_ethtool_stats(struct net_device *ndev,
1921 				       struct ethtool_stats *stats,
1922 				       uint64_t *data)
1923 {
1924 	struct netcp_intf *netcp = netdev_priv(ndev);
1925 	struct gbe_intf *gbe_intf;
1926 	struct gbe_priv *gbe_dev;
1927 
1928 	gbe_intf = keystone_get_intf_data(netcp);
1929 	if (!gbe_intf)
1930 		return;
1931 
1932 	gbe_dev = gbe_intf->gbe_dev;
1933 	spin_lock_bh(&gbe_dev->hw_stats_lock);
1934 	if (IS_SS_ID_VER_14(gbe_dev))
1935 		gbe_update_stats_ver14(gbe_dev, data);
1936 	else
1937 		gbe_update_stats(gbe_dev, data);
1938 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1939 }
1940 
1941 static int keystone_get_link_ksettings(struct net_device *ndev,
1942 				       struct ethtool_link_ksettings *cmd)
1943 {
1944 	struct netcp_intf *netcp = netdev_priv(ndev);
1945 	struct phy_device *phy = ndev->phydev;
1946 	struct gbe_intf *gbe_intf;
1947 
1948 	if (!phy)
1949 		return -EINVAL;
1950 
1951 	gbe_intf = keystone_get_intf_data(netcp);
1952 	if (!gbe_intf)
1953 		return -EINVAL;
1954 
1955 	if (!gbe_intf->slave)
1956 		return -EINVAL;
1957 
1958 	phy_ethtool_ksettings_get(phy, cmd);
1959 	cmd->base.port = gbe_intf->slave->phy_port_t;
1960 
1961 	return 0;
1962 }
1963 
1964 static int keystone_set_link_ksettings(struct net_device *ndev,
1965 				       const struct ethtool_link_ksettings *cmd)
1966 {
1967 	struct netcp_intf *netcp = netdev_priv(ndev);
1968 	struct phy_device *phy = ndev->phydev;
1969 	struct gbe_intf *gbe_intf;
1970 	u8 port = cmd->base.port;
1971 	u32 advertising, supported;
1972 	u32 features;
1973 
1974 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1975 						cmd->link_modes.advertising);
1976 	ethtool_convert_link_mode_to_legacy_u32(&supported,
1977 						cmd->link_modes.supported);
1978 	features = advertising & supported;
1979 
1980 	if (!phy)
1981 		return -EINVAL;
1982 
1983 	gbe_intf = keystone_get_intf_data(netcp);
1984 	if (!gbe_intf)
1985 		return -EINVAL;
1986 
1987 	if (!gbe_intf->slave)
1988 		return -EINVAL;
1989 
1990 	if (port != gbe_intf->slave->phy_port_t) {
1991 		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1992 			return -EINVAL;
1993 
1994 		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1995 			return -EINVAL;
1996 
1997 		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1998 			return -EINVAL;
1999 
2000 		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
2001 			return -EINVAL;
2002 
2003 		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
2004 			return -EINVAL;
2005 	}
2006 
2007 	gbe_intf->slave->phy_port_t = port;
2008 	return phy_ethtool_ksettings_set(phy, cmd);
2009 }
2010 
2011 #if IS_ENABLED(CONFIG_TI_CPTS)
2012 static int keystone_get_ts_info(struct net_device *ndev,
2013 				struct ethtool_ts_info *info)
2014 {
2015 	struct netcp_intf *netcp = netdev_priv(ndev);
2016 	struct gbe_intf *gbe_intf;
2017 
2018 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2019 	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2020 		return -EINVAL;
2021 
2022 	info->so_timestamping =
2023 		SOF_TIMESTAMPING_TX_HARDWARE |
2024 		SOF_TIMESTAMPING_TX_SOFTWARE |
2025 		SOF_TIMESTAMPING_RX_HARDWARE |
2026 		SOF_TIMESTAMPING_RX_SOFTWARE |
2027 		SOF_TIMESTAMPING_SOFTWARE |
2028 		SOF_TIMESTAMPING_RAW_HARDWARE;
2029 	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2030 	info->tx_types =
2031 		(1 << HWTSTAMP_TX_OFF) |
2032 		(1 << HWTSTAMP_TX_ON);
2033 	info->rx_filters =
2034 		(1 << HWTSTAMP_FILTER_NONE) |
2035 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2036 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2037 	return 0;
2038 }
2039 #else
2040 static int keystone_get_ts_info(struct net_device *ndev,
2041 				struct ethtool_ts_info *info)
2042 {
2043 	info->so_timestamping =
2044 		SOF_TIMESTAMPING_TX_SOFTWARE |
2045 		SOF_TIMESTAMPING_RX_SOFTWARE |
2046 		SOF_TIMESTAMPING_SOFTWARE;
2047 	info->phc_index = -1;
2048 	info->tx_types = 0;
2049 	info->rx_filters = 0;
2050 	return 0;
2051 }
2052 #endif /* CONFIG_TI_CPTS */
2053 
2054 static const struct ethtool_ops keystone_ethtool_ops = {
2055 	.get_drvinfo		= keystone_get_drvinfo,
2056 	.get_link		= ethtool_op_get_link,
2057 	.get_msglevel		= keystone_get_msglevel,
2058 	.set_msglevel		= keystone_set_msglevel,
2059 	.get_strings		= keystone_get_stat_strings,
2060 	.get_sset_count		= keystone_get_sset_count,
2061 	.get_ethtool_stats	= keystone_get_ethtool_stats,
2062 	.get_link_ksettings	= keystone_get_link_ksettings,
2063 	.set_link_ksettings	= keystone_set_link_ksettings,
2064 	.get_ts_info		= keystone_get_ts_info,
2065 };
2066 
2067 static void gbe_set_slave_mac(struct gbe_slave *slave,
2068 			      struct gbe_intf *gbe_intf)
2069 {
2070 	struct net_device *ndev = gbe_intf->ndev;
2071 
2072 	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2073 	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2074 }
2075 
2076 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2077 {
2078 	if (priv->host_port == 0)
2079 		return slave_num + 1;
2080 
2081 	return slave_num;
2082 }
2083 
2084 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2085 					  struct net_device *ndev,
2086 					  struct gbe_slave *slave,
2087 					  int up)
2088 {
2089 	struct phy_device *phy = slave->phy;
2090 	u32 mac_control = 0;
2091 
2092 	if (up) {
2093 		mac_control = slave->mac_control;
2094 		if (phy && (phy->speed == SPEED_1000)) {
2095 			mac_control |= MACSL_GIG_MODE;
2096 			mac_control &= ~MACSL_XGIG_MODE;
2097 		} else if (phy && (phy->speed == SPEED_10000)) {
2098 			mac_control |= MACSL_XGIG_MODE;
2099 			mac_control &= ~MACSL_GIG_MODE;
2100 		}
2101 
2102 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2103 						 mac_control));
2104 
2105 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2106 				     ALE_PORT_STATE,
2107 				     ALE_PORT_STATE_FORWARD);
2108 
2109 		if (ndev && slave->open &&
2110 		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2111 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2112 		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2113 			netif_carrier_on(ndev);
2114 	} else {
2115 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2116 						 mac_control));
2117 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2118 				     ALE_PORT_STATE,
2119 				     ALE_PORT_STATE_DISABLE);
2120 		if (ndev &&
2121 		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2122 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2123 		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2124 			netif_carrier_off(ndev);
2125 	}
2126 
2127 	if (phy)
2128 		phy_print_status(phy);
2129 }
2130 
2131 static bool gbe_phy_link_status(struct gbe_slave *slave)
2132 {
2133 	 return !slave->phy || slave->phy->link;
2134 }
2135 
2136 #define RGMII_REG_STATUS_LINK	BIT(0)
2137 
2138 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2139 {
2140 	u32 val = 0;
2141 
2142 	val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2143 	*status = !!(val & RGMII_REG_STATUS_LINK);
2144 }
2145 
2146 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2147 					  struct gbe_slave *slave,
2148 					  struct net_device *ndev)
2149 {
2150 	bool sw_link_state = true, phy_link_state;
2151 	int sp = slave->slave_num, link_state;
2152 
2153 	if (!slave->open)
2154 		return;
2155 
2156 	if (SLAVE_LINK_IS_RGMII(slave))
2157 		netcp_2u_rgmii_get_port_link(gbe_dev,
2158 					     &sw_link_state);
2159 	if (SLAVE_LINK_IS_SGMII(slave))
2160 		sw_link_state =
2161 		netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2162 
2163 	phy_link_state = gbe_phy_link_status(slave);
2164 	link_state = phy_link_state & sw_link_state;
2165 
2166 	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2167 		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2168 					      link_state);
2169 }
2170 
2171 static void xgbe_adjust_link(struct net_device *ndev)
2172 {
2173 	struct netcp_intf *netcp = netdev_priv(ndev);
2174 	struct gbe_intf *gbe_intf;
2175 
2176 	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2177 	if (!gbe_intf)
2178 		return;
2179 
2180 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2181 				      ndev);
2182 }
2183 
2184 static void gbe_adjust_link(struct net_device *ndev)
2185 {
2186 	struct netcp_intf *netcp = netdev_priv(ndev);
2187 	struct gbe_intf *gbe_intf;
2188 
2189 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2190 	if (!gbe_intf)
2191 		return;
2192 
2193 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2194 				      ndev);
2195 }
2196 
2197 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2198 {
2199 	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2200 	struct gbe_slave *slave;
2201 
2202 	for_each_sec_slave(slave, gbe_dev)
2203 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2204 }
2205 
2206 /* Reset EMAC
2207  * Soft reset is set and polled until clear, or until a timeout occurs
2208  */
2209 static int gbe_port_reset(struct gbe_slave *slave)
2210 {
2211 	u32 i, v;
2212 
2213 	/* Set the soft reset bit */
2214 	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2215 
2216 	/* Wait for the bit to clear */
2217 	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2218 		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2219 		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2220 			return 0;
2221 	}
2222 
2223 	/* Timeout on the reset */
2224 	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2225 }
2226 
2227 /* Configure EMAC */
2228 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2229 			    int max_rx_len)
2230 {
2231 	void __iomem *rx_maxlen_reg;
2232 	u32 xgmii_mode;
2233 
2234 	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2235 		max_rx_len = NETCP_MAX_FRAME_SIZE;
2236 
2237 	/* Enable correct MII mode at SS level */
2238 	if (IS_SS_ID_XGBE(gbe_dev) &&
2239 	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2240 		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2241 		xgmii_mode |= (1 << slave->slave_num);
2242 		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2243 	}
2244 
2245 	if (IS_SS_ID_MU(gbe_dev))
2246 		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2247 	else
2248 		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2249 
2250 	writel(max_rx_len, rx_maxlen_reg);
2251 	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2252 }
2253 
2254 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2255 			      struct gbe_slave *slave, bool set)
2256 {
2257 	if (SLAVE_LINK_IS_XGMII(slave))
2258 		return;
2259 
2260 	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2261 			    slave->slave_num, set);
2262 }
2263 
2264 static void gbe_slave_stop(struct gbe_intf *intf)
2265 {
2266 	struct gbe_priv *gbe_dev = intf->gbe_dev;
2267 	struct gbe_slave *slave = intf->slave;
2268 
2269 	if (!IS_SS_ID_2U(gbe_dev))
2270 		gbe_sgmii_rtreset(gbe_dev, slave, true);
2271 	gbe_port_reset(slave);
2272 	/* Disable forwarding */
2273 	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2274 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2275 	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2276 			   1 << slave->port_num, 0, 0);
2277 
2278 	if (!slave->phy)
2279 		return;
2280 
2281 	phy_stop(slave->phy);
2282 	phy_disconnect(slave->phy);
2283 	slave->phy = NULL;
2284 }
2285 
2286 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2287 {
2288 	if (SLAVE_LINK_IS_XGMII(slave))
2289 		return;
2290 
2291 	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2292 	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2293 			   slave->link_interface);
2294 }
2295 
2296 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2297 {
2298 	struct gbe_priv *priv = gbe_intf->gbe_dev;
2299 	struct gbe_slave *slave = gbe_intf->slave;
2300 	phy_interface_t phy_mode;
2301 	bool has_phy = false;
2302 
2303 	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2304 
2305 	if (!IS_SS_ID_2U(priv))
2306 		gbe_sgmii_config(priv, slave);
2307 	gbe_port_reset(slave);
2308 	if (!IS_SS_ID_2U(priv))
2309 		gbe_sgmii_rtreset(priv, slave, false);
2310 	gbe_port_config(priv, slave, priv->rx_packet_max);
2311 	gbe_set_slave_mac(slave, gbe_intf);
2312 	/* For NU & 2U switch, map the vlan priorities to zero
2313 	 * as we only configure to use priority 0
2314 	 */
2315 	if (IS_SS_ID_MU(priv))
2316 		writel(HOST_TX_PRI_MAP_DEFAULT,
2317 		       GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2318 
2319 	/* enable forwarding */
2320 	cpsw_ale_control_set(priv->ale, slave->port_num,
2321 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2322 	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2323 			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2324 
2325 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2326 		has_phy = true;
2327 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2328 		slave->phy_port_t = PORT_MII;
2329 	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2330 		has_phy = true;
2331 		phy_mode = of_get_phy_mode(slave->node);
2332 		/* if phy-mode is not present, default to
2333 		 * PHY_INTERFACE_MODE_RGMII
2334 		 */
2335 		if (phy_mode < 0)
2336 			phy_mode = PHY_INTERFACE_MODE_RGMII;
2337 
2338 		if (!phy_interface_mode_is_rgmii(phy_mode)) {
2339 			dev_err(priv->dev,
2340 				"Unsupported phy mode %d\n", phy_mode);
2341 			return -EINVAL;
2342 		}
2343 		slave->phy_port_t = PORT_MII;
2344 	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2345 		has_phy = true;
2346 		phy_mode = PHY_INTERFACE_MODE_NA;
2347 		slave->phy_port_t = PORT_FIBRE;
2348 	}
2349 
2350 	if (has_phy) {
2351 		if (IS_SS_ID_XGBE(priv))
2352 			hndlr = xgbe_adjust_link;
2353 
2354 		slave->phy = of_phy_connect(gbe_intf->ndev,
2355 					    slave->phy_node,
2356 					    hndlr, 0,
2357 					    phy_mode);
2358 		if (!slave->phy) {
2359 			dev_err(priv->dev, "phy not found on slave %d\n",
2360 				slave->slave_num);
2361 			return -ENODEV;
2362 		}
2363 		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2364 			phydev_name(slave->phy));
2365 		phy_start(slave->phy);
2366 	}
2367 	return 0;
2368 }
2369 
2370 static void gbe_init_host_port(struct gbe_priv *priv)
2371 {
2372 	int bypass_en = 1;
2373 
2374 	/* Host Tx Pri */
2375 	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2376 		writel(HOST_TX_PRI_MAP_DEFAULT,
2377 		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2378 
2379 	/* Max length register */
2380 	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2381 						  rx_maxlen));
2382 
2383 	cpsw_ale_start(priv->ale);
2384 
2385 	if (priv->enable_ale)
2386 		bypass_en = 0;
2387 
2388 	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2389 
2390 	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2391 
2392 	cpsw_ale_control_set(priv->ale, priv->host_port,
2393 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2394 
2395 	cpsw_ale_control_set(priv->ale, 0,
2396 			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2397 			     GBE_PORT_MASK(priv->ale_ports));
2398 
2399 	cpsw_ale_control_set(priv->ale, 0,
2400 			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2401 			     GBE_PORT_MASK(priv->ale_ports - 1));
2402 
2403 	cpsw_ale_control_set(priv->ale, 0,
2404 			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2405 			     GBE_PORT_MASK(priv->ale_ports));
2406 
2407 	cpsw_ale_control_set(priv->ale, 0,
2408 			     ALE_PORT_UNTAGGED_EGRESS,
2409 			     GBE_PORT_MASK(priv->ale_ports));
2410 }
2411 
2412 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2413 {
2414 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2415 	u16 vlan_id;
2416 
2417 	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2418 			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2419 			   ALE_MCAST_FWD_2);
2420 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2421 		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2422 				   GBE_PORT_MASK(gbe_dev->ale_ports),
2423 				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2424 	}
2425 }
2426 
2427 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2428 {
2429 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2430 	u16 vlan_id;
2431 
2432 	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2433 
2434 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2435 		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2436 				   ALE_VLAN, vlan_id);
2437 }
2438 
2439 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2440 {
2441 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2442 	u16 vlan_id;
2443 
2444 	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2445 
2446 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2447 		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2448 	}
2449 }
2450 
2451 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2452 {
2453 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2454 	u16 vlan_id;
2455 
2456 	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2457 
2458 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2459 		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2460 				   ALE_VLAN, vlan_id);
2461 	}
2462 }
2463 
2464 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2465 {
2466 	struct gbe_intf *gbe_intf = intf_priv;
2467 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2468 
2469 	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2470 		naddr->addr, naddr->type);
2471 
2472 	switch (naddr->type) {
2473 	case ADDR_MCAST:
2474 	case ADDR_BCAST:
2475 		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2476 		break;
2477 	case ADDR_UCAST:
2478 	case ADDR_DEV:
2479 		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2480 		break;
2481 	case ADDR_ANY:
2482 		/* nothing to do for promiscuous */
2483 	default:
2484 		break;
2485 	}
2486 
2487 	return 0;
2488 }
2489 
2490 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2491 {
2492 	struct gbe_intf *gbe_intf = intf_priv;
2493 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2494 
2495 	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2496 		naddr->addr, naddr->type);
2497 
2498 	switch (naddr->type) {
2499 	case ADDR_MCAST:
2500 	case ADDR_BCAST:
2501 		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2502 		break;
2503 	case ADDR_UCAST:
2504 	case ADDR_DEV:
2505 		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2506 		break;
2507 	case ADDR_ANY:
2508 		/* nothing to do for promiscuous */
2509 	default:
2510 		break;
2511 	}
2512 
2513 	return 0;
2514 }
2515 
2516 static int gbe_add_vid(void *intf_priv, int vid)
2517 {
2518 	struct gbe_intf *gbe_intf = intf_priv;
2519 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2520 
2521 	set_bit(vid, gbe_intf->active_vlans);
2522 
2523 	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2524 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2525 			  GBE_MASK_NO_PORTS,
2526 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2527 			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2528 
2529 	return 0;
2530 }
2531 
2532 static int gbe_del_vid(void *intf_priv, int vid)
2533 {
2534 	struct gbe_intf *gbe_intf = intf_priv;
2535 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2536 
2537 	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2538 	clear_bit(vid, gbe_intf->active_vlans);
2539 	return 0;
2540 }
2541 
2542 #if IS_ENABLED(CONFIG_TI_CPTS)
2543 #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2544 #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2545 
2546 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2547 {
2548 	struct gbe_intf *gbe_intf = context;
2549 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2550 
2551 	cpts_tx_timestamp(gbe_dev->cpts, skb);
2552 }
2553 
2554 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2555 			      const struct netcp_packet *p_info)
2556 {
2557 	struct sk_buff *skb = p_info->skb;
2558 
2559 	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2560 }
2561 
2562 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2563 				 struct netcp_packet *p_info)
2564 {
2565 	struct phy_device *phydev = p_info->skb->dev->phydev;
2566 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2567 
2568 	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2569 	    !gbe_dev->tx_ts_enabled)
2570 		return 0;
2571 
2572 	/* If phy has the txtstamp api, assume it will do it.
2573 	 * We mark it here because skb_tx_timestamp() is called
2574 	 * after all the txhooks are called.
2575 	 */
2576 	if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2577 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2578 		return 0;
2579 	}
2580 
2581 	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2582 		p_info->txtstamp = gbe_txtstamp;
2583 		p_info->ts_context = (void *)gbe_intf;
2584 		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2585 	}
2586 
2587 	return 0;
2588 }
2589 
2590 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2591 {
2592 	struct phy_device *phydev = p_info->skb->dev->phydev;
2593 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2594 
2595 	if (p_info->rxtstamp_complete)
2596 		return 0;
2597 
2598 	if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2599 		p_info->rxtstamp_complete = true;
2600 		return 0;
2601 	}
2602 
2603 	if (gbe_dev->rx_ts_enabled)
2604 		cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2605 
2606 	p_info->rxtstamp_complete = true;
2607 
2608 	return 0;
2609 }
2610 
2611 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2612 {
2613 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2614 	struct cpts *cpts = gbe_dev->cpts;
2615 	struct hwtstamp_config cfg;
2616 
2617 	if (!cpts)
2618 		return -EOPNOTSUPP;
2619 
2620 	cfg.flags = 0;
2621 	cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2622 	cfg.rx_filter = gbe_dev->rx_ts_enabled;
2623 
2624 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2625 }
2626 
2627 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2628 {
2629 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2630 	struct gbe_slave *slave = gbe_intf->slave;
2631 	u32 ts_en, seq_id, ctl;
2632 
2633 	if (!gbe_dev->rx_ts_enabled &&
2634 	    !gbe_dev->tx_ts_enabled) {
2635 		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2636 		return;
2637 	}
2638 
2639 	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2640 	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2641 	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2642 		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2643 		(slave->ts_ctl.uni ?  TS_UNI_EN :
2644 			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2645 
2646 	if (gbe_dev->tx_ts_enabled)
2647 		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2648 
2649 	if (gbe_dev->rx_ts_enabled)
2650 		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2651 
2652 	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2653 	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2654 	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2655 }
2656 
2657 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2658 {
2659 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2660 	struct cpts *cpts = gbe_dev->cpts;
2661 	struct hwtstamp_config cfg;
2662 
2663 	if (!cpts)
2664 		return -EOPNOTSUPP;
2665 
2666 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2667 		return -EFAULT;
2668 
2669 	/* reserved for future extensions */
2670 	if (cfg.flags)
2671 		return -EINVAL;
2672 
2673 	switch (cfg.tx_type) {
2674 	case HWTSTAMP_TX_OFF:
2675 		gbe_dev->tx_ts_enabled = 0;
2676 		break;
2677 	case HWTSTAMP_TX_ON:
2678 		gbe_dev->tx_ts_enabled = 1;
2679 		break;
2680 	default:
2681 		return -ERANGE;
2682 	}
2683 
2684 	switch (cfg.rx_filter) {
2685 	case HWTSTAMP_FILTER_NONE:
2686 		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2687 		break;
2688 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2689 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2690 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2691 		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2692 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2693 		break;
2694 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2695 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2696 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2697 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2698 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2699 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2700 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2701 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2702 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2703 		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2704 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2705 		break;
2706 	default:
2707 		return -ERANGE;
2708 	}
2709 
2710 	gbe_hwtstamp(gbe_intf);
2711 
2712 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2713 }
2714 
2715 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2716 {
2717 	if (!gbe_dev->cpts)
2718 		return;
2719 
2720 	if (gbe_dev->cpts_registered > 0)
2721 		goto done;
2722 
2723 	if (cpts_register(gbe_dev->cpts)) {
2724 		dev_err(gbe_dev->dev, "error registering cpts device\n");
2725 		return;
2726 	}
2727 
2728 done:
2729 	++gbe_dev->cpts_registered;
2730 }
2731 
2732 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2733 {
2734 	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2735 		return;
2736 
2737 	if (--gbe_dev->cpts_registered)
2738 		return;
2739 
2740 	cpts_unregister(gbe_dev->cpts);
2741 }
2742 #else
2743 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2744 					struct netcp_packet *p_info)
2745 {
2746 	return 0;
2747 }
2748 
2749 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2750 			       struct netcp_packet *p_info)
2751 {
2752 	return 0;
2753 }
2754 
2755 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2756 			       struct ifreq *ifr, int cmd)
2757 {
2758 	return -EOPNOTSUPP;
2759 }
2760 
2761 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2762 {
2763 }
2764 
2765 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2766 {
2767 }
2768 
2769 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2770 {
2771 	return -EOPNOTSUPP;
2772 }
2773 
2774 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2775 {
2776 	return -EOPNOTSUPP;
2777 }
2778 #endif /* CONFIG_TI_CPTS */
2779 
2780 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2781 {
2782 	struct gbe_intf *gbe_intf = intf_priv;
2783 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2784 	struct cpsw_ale *ale = gbe_dev->ale;
2785 	unsigned long timeout;
2786 	int i, ret = -ETIMEDOUT;
2787 
2788 	/* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2789 	 * slaves are port 1 and up
2790 	 */
2791 	for (i = 0; i <= gbe_dev->num_slaves; i++) {
2792 		cpsw_ale_control_set(ale, i,
2793 				     ALE_PORT_NOLEARN, !!promisc);
2794 		cpsw_ale_control_set(ale, i,
2795 				     ALE_PORT_NO_SA_UPDATE, !!promisc);
2796 	}
2797 
2798 	if (!promisc) {
2799 		/* Don't Flood All Unicast Packets to Host port */
2800 		cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2801 		dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2802 		return 0;
2803 	}
2804 
2805 	timeout = jiffies + HZ;
2806 
2807 	/* Clear All Untouched entries */
2808 	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2809 	do {
2810 		cpu_relax();
2811 		if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2812 			ret = 0;
2813 			break;
2814 		}
2815 
2816 	} while (time_after(timeout, jiffies));
2817 
2818 	/* Make sure it is not a false timeout */
2819 	if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2820 		return ret;
2821 
2822 	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2823 
2824 	/* Clear all mcast from ALE */
2825 	cpsw_ale_flush_multicast(ale,
2826 				 GBE_PORT_MASK(gbe_dev->ale_ports),
2827 				 -1);
2828 
2829 	/* Flood All Unicast Packets to Host port */
2830 	cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2831 	dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2832 	return ret;
2833 }
2834 
2835 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2836 {
2837 	struct gbe_intf *gbe_intf = intf_priv;
2838 	struct phy_device *phy = gbe_intf->slave->phy;
2839 
2840 	if (!phy || !phy->drv->hwtstamp) {
2841 		switch (cmd) {
2842 		case SIOCGHWTSTAMP:
2843 			return gbe_hwtstamp_get(gbe_intf, req);
2844 		case SIOCSHWTSTAMP:
2845 			return gbe_hwtstamp_set(gbe_intf, req);
2846 		}
2847 	}
2848 
2849 	if (phy)
2850 		return phy_mii_ioctl(phy, req, cmd);
2851 
2852 	return -EOPNOTSUPP;
2853 }
2854 
2855 static void netcp_ethss_timer(struct timer_list *t)
2856 {
2857 	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2858 	struct gbe_intf *gbe_intf;
2859 	struct gbe_slave *slave;
2860 
2861 	/* Check & update SGMII link state of interfaces */
2862 	for_each_intf(gbe_intf, gbe_dev) {
2863 		if (!gbe_intf->slave->open)
2864 			continue;
2865 		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2866 					      gbe_intf->ndev);
2867 	}
2868 
2869 	/* Check & update SGMII link state of secondary ports */
2870 	for_each_sec_slave(slave, gbe_dev) {
2871 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2872 	}
2873 
2874 	/* A timer runs as a BH, no need to block them */
2875 	spin_lock(&gbe_dev->hw_stats_lock);
2876 
2877 	if (IS_SS_ID_VER_14(gbe_dev))
2878 		gbe_update_stats_ver14(gbe_dev, NULL);
2879 	else
2880 		gbe_update_stats(gbe_dev, NULL);
2881 
2882 	spin_unlock(&gbe_dev->hw_stats_lock);
2883 
2884 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2885 	add_timer(&gbe_dev->timer);
2886 }
2887 
2888 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2889 {
2890 	struct gbe_intf *gbe_intf = data;
2891 
2892 	p_info->tx_pipe = &gbe_intf->tx_pipe;
2893 
2894 	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2895 }
2896 
2897 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2898 {
2899 	struct gbe_intf *gbe_intf = data;
2900 
2901 	return gbe_rxtstamp(gbe_intf, p_info);
2902 }
2903 
2904 static int gbe_open(void *intf_priv, struct net_device *ndev)
2905 {
2906 	struct gbe_intf *gbe_intf = intf_priv;
2907 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2908 	struct netcp_intf *netcp = netdev_priv(ndev);
2909 	struct gbe_slave *slave = gbe_intf->slave;
2910 	int port_num = slave->port_num;
2911 	u32 reg, val;
2912 	int ret;
2913 
2914 	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2915 	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2916 		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2917 		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2918 
2919 	/* For 10G and on NetCP 1.5, use directed to port */
2920 	if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2921 		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2922 
2923 	if (gbe_dev->enable_ale)
2924 		gbe_intf->tx_pipe.switch_to_port = 0;
2925 	else
2926 		gbe_intf->tx_pipe.switch_to_port = port_num;
2927 
2928 	dev_dbg(gbe_dev->dev,
2929 		"opened TX channel %s: %p with to port %d, flags %d\n",
2930 		gbe_intf->tx_pipe.dma_chan_name,
2931 		gbe_intf->tx_pipe.dma_channel,
2932 		gbe_intf->tx_pipe.switch_to_port,
2933 		gbe_intf->tx_pipe.flags);
2934 
2935 	gbe_slave_stop(gbe_intf);
2936 
2937 	/* disable priority elevation and enable statistics on all ports */
2938 	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2939 
2940 	/* Control register */
2941 	val = GBE_CTL_P0_ENABLE;
2942 	if (IS_SS_ID_MU(gbe_dev)) {
2943 		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2944 		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2945 	}
2946 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2947 
2948 	/* All statistics enabled and STAT AB visible by default */
2949 	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2950 						    stat_port_en));
2951 
2952 	ret = gbe_slave_open(gbe_intf);
2953 	if (ret)
2954 		goto fail;
2955 
2956 	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2957 	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2958 
2959 	slave->open = true;
2960 	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2961 
2962 	gbe_register_cpts(gbe_dev);
2963 
2964 	return 0;
2965 
2966 fail:
2967 	gbe_slave_stop(gbe_intf);
2968 	return ret;
2969 }
2970 
2971 static int gbe_close(void *intf_priv, struct net_device *ndev)
2972 {
2973 	struct gbe_intf *gbe_intf = intf_priv;
2974 	struct netcp_intf *netcp = netdev_priv(ndev);
2975 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2976 
2977 	gbe_unregister_cpts(gbe_dev);
2978 
2979 	gbe_slave_stop(gbe_intf);
2980 
2981 	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2982 	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2983 
2984 	gbe_intf->slave->open = false;
2985 	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2986 	return 0;
2987 }
2988 
2989 #if IS_ENABLED(CONFIG_TI_CPTS)
2990 static void init_slave_ts_ctl(struct gbe_slave *slave)
2991 {
2992 	slave->ts_ctl.uni = 1;
2993 	slave->ts_ctl.dst_port_map =
2994 		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2995 	slave->ts_ctl.maddr_map =
2996 		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2997 }
2998 
2999 #else
3000 static void init_slave_ts_ctl(struct gbe_slave *slave)
3001 {
3002 }
3003 #endif /* CONFIG_TI_CPTS */
3004 
3005 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
3006 		      struct device_node *node)
3007 {
3008 	int port_reg_num;
3009 	u32 port_reg_ofs, emac_reg_ofs;
3010 	u32 port_reg_blk_sz, emac_reg_blk_sz;
3011 
3012 	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3013 		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3014 		return -EINVAL;
3015 	}
3016 
3017 	if (of_property_read_u32(node, "link-interface",
3018 				 &slave->link_interface)) {
3019 		dev_warn(gbe_dev->dev,
3020 			 "missing link-interface value defaulting to 1G mac-phy link\n");
3021 		slave->link_interface = SGMII_LINK_MAC_PHY;
3022 	}
3023 
3024 	slave->node = node;
3025 	slave->open = false;
3026 	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3027 	    (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3028 	    (slave->link_interface == XGMII_LINK_MAC_PHY))
3029 		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3030 	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3031 
3032 	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3033 		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3034 	else
3035 		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3036 
3037 	/* Emac regs memmap are contiguous but port regs are not */
3038 	port_reg_num = slave->slave_num;
3039 	if (IS_SS_ID_VER_14(gbe_dev)) {
3040 		if (slave->slave_num > 1) {
3041 			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3042 			port_reg_num -= 2;
3043 		} else {
3044 			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3045 		}
3046 		emac_reg_ofs = GBE13_EMAC_OFFSET;
3047 		port_reg_blk_sz = 0x30;
3048 		emac_reg_blk_sz = 0x40;
3049 	} else if (IS_SS_ID_MU(gbe_dev)) {
3050 		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3051 		emac_reg_ofs = GBENU_EMAC_OFFSET;
3052 		port_reg_blk_sz = 0x1000;
3053 		emac_reg_blk_sz = 0x1000;
3054 	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3055 		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3056 		emac_reg_ofs = XGBE10_EMAC_OFFSET;
3057 		port_reg_blk_sz = 0x30;
3058 		emac_reg_blk_sz = 0x40;
3059 	} else {
3060 		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3061 			gbe_dev->ss_version);
3062 		return -EINVAL;
3063 	}
3064 
3065 	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3066 				(port_reg_blk_sz * port_reg_num);
3067 	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3068 				(emac_reg_blk_sz * slave->slave_num);
3069 
3070 	if (IS_SS_ID_VER_14(gbe_dev)) {
3071 		/* Initialize  slave port register offsets */
3072 		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3073 		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3074 		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3075 		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3076 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3077 		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3078 		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3079 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3080 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3081 
3082 		/* Initialize EMAC register offsets */
3083 		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3084 		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3085 		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3086 
3087 	} else if (IS_SS_ID_MU(gbe_dev)) {
3088 		/* Initialize  slave port register offsets */
3089 		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3090 		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3091 		GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3092 		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3093 		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3094 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3095 		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3096 		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3097 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3098 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3099 		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3100 
3101 		/* Initialize EMAC register offsets */
3102 		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3103 		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3104 
3105 	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3106 		/* Initialize  slave port register offsets */
3107 		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3108 		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3109 		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3110 		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3111 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3112 		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3113 		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3114 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3115 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3116 
3117 		/* Initialize EMAC register offsets */
3118 		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3119 		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3120 		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3121 	}
3122 
3123 	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3124 
3125 	init_slave_ts_ctl(slave);
3126 	return 0;
3127 }
3128 
3129 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3130 				 struct device_node *node)
3131 {
3132 	struct device *dev = gbe_dev->dev;
3133 	phy_interface_t phy_mode;
3134 	struct gbe_priv **priv;
3135 	struct device_node *port;
3136 	struct gbe_slave *slave;
3137 	bool mac_phy_link = false;
3138 
3139 	for_each_child_of_node(node, port) {
3140 		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3141 		if (!slave) {
3142 			dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3143 				port);
3144 			continue;
3145 		}
3146 
3147 		if (init_slave(gbe_dev, slave, port)) {
3148 			dev_err(dev,
3149 				"Failed to initialize secondary port(%pOFn), skipping...\n",
3150 				port);
3151 			devm_kfree(dev, slave);
3152 			continue;
3153 		}
3154 
3155 		if (!IS_SS_ID_2U(gbe_dev))
3156 			gbe_sgmii_config(gbe_dev, slave);
3157 		gbe_port_reset(slave);
3158 		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3159 		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3160 		gbe_dev->num_slaves++;
3161 		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3162 		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3163 			mac_phy_link = true;
3164 
3165 		slave->open = true;
3166 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3167 			of_node_put(port);
3168 			break;
3169 		}
3170 	}
3171 
3172 	/* of_phy_connect() is needed only for MAC-PHY interface */
3173 	if (!mac_phy_link)
3174 		return;
3175 
3176 	/* Allocate dummy netdev device for attaching to phy device */
3177 	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3178 					NET_NAME_UNKNOWN, ether_setup);
3179 	if (!gbe_dev->dummy_ndev) {
3180 		dev_err(dev,
3181 			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3182 		return;
3183 	}
3184 	priv = netdev_priv(gbe_dev->dummy_ndev);
3185 	*priv = gbe_dev;
3186 
3187 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3188 		phy_mode = PHY_INTERFACE_MODE_SGMII;
3189 		slave->phy_port_t = PORT_MII;
3190 	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3191 		phy_mode = PHY_INTERFACE_MODE_RGMII;
3192 		slave->phy_port_t = PORT_MII;
3193 	} else {
3194 		phy_mode = PHY_INTERFACE_MODE_NA;
3195 		slave->phy_port_t = PORT_FIBRE;
3196 	}
3197 
3198 	for_each_sec_slave(slave, gbe_dev) {
3199 		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3200 		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3201 		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3202 			continue;
3203 		slave->phy =
3204 			of_phy_connect(gbe_dev->dummy_ndev,
3205 				       slave->phy_node,
3206 				       gbe_adjust_link_sec_slaves,
3207 				       0, phy_mode);
3208 		if (!slave->phy) {
3209 			dev_err(dev, "phy not found for slave %d\n",
3210 				slave->slave_num);
3211 		} else {
3212 			dev_dbg(dev, "phy found: id is: 0x%s\n",
3213 				phydev_name(slave->phy));
3214 			phy_start(slave->phy);
3215 		}
3216 	}
3217 }
3218 
3219 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3220 {
3221 	struct gbe_slave *slave;
3222 
3223 	while (!list_empty(&gbe_dev->secondary_slaves)) {
3224 		slave = first_sec_slave(gbe_dev);
3225 
3226 		if (slave->phy)
3227 			phy_disconnect(slave->phy);
3228 		list_del(&slave->slave_list);
3229 	}
3230 	if (gbe_dev->dummy_ndev)
3231 		free_netdev(gbe_dev->dummy_ndev);
3232 }
3233 
3234 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3235 				 struct device_node *node)
3236 {
3237 	struct resource res;
3238 	void __iomem *regs;
3239 	int ret, i;
3240 
3241 	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3242 	if (ret) {
3243 		dev_err(gbe_dev->dev,
3244 			"Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3245 			node, XGBE_SS_REG_INDEX);
3246 		return ret;
3247 	}
3248 
3249 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3250 	if (IS_ERR(regs)) {
3251 		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3252 		return PTR_ERR(regs);
3253 	}
3254 	gbe_dev->ss_regs = regs;
3255 
3256 	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3257 	if (ret) {
3258 		dev_err(gbe_dev->dev,
3259 			"Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3260 			node, XGBE_SM_REG_INDEX);
3261 		return ret;
3262 	}
3263 
3264 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3265 	if (IS_ERR(regs)) {
3266 		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3267 		return PTR_ERR(regs);
3268 	}
3269 	gbe_dev->switch_regs = regs;
3270 
3271 	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3272 	if (ret) {
3273 		dev_err(gbe_dev->dev,
3274 			"Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3275 			node, XGBE_SERDES_REG_INDEX);
3276 		return ret;
3277 	}
3278 
3279 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3280 	if (IS_ERR(regs)) {
3281 		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3282 		return PTR_ERR(regs);
3283 	}
3284 	gbe_dev->xgbe_serdes_regs = regs;
3285 
3286 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3287 	gbe_dev->et_stats = xgbe10_et_stats;
3288 	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3289 
3290 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3291 					 gbe_dev->num_et_stats, sizeof(u64),
3292 					 GFP_KERNEL);
3293 	if (!gbe_dev->hw_stats) {
3294 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3295 		return -ENOMEM;
3296 	}
3297 
3298 	gbe_dev->hw_stats_prev =
3299 		devm_kcalloc(gbe_dev->dev,
3300 			     gbe_dev->num_et_stats, sizeof(u32),
3301 			     GFP_KERNEL);
3302 	if (!gbe_dev->hw_stats_prev) {
3303 		dev_err(gbe_dev->dev,
3304 			"hw_stats_prev memory allocation failed\n");
3305 		return -ENOMEM;
3306 	}
3307 
3308 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3309 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3310 					XGBE10_SGMII_MODULE_OFFSET;
3311 	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3312 
3313 	for (i = 0; i < gbe_dev->max_num_ports; i++)
3314 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3315 			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3316 
3317 	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3318 	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3319 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3320 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3321 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3322 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3323 
3324 	/* Subsystem registers */
3325 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3326 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3327 
3328 	/* Switch module registers */
3329 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3330 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3331 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3332 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3333 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3334 
3335 	/* Host port registers */
3336 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3337 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3338 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3339 	return 0;
3340 }
3341 
3342 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3343 				    struct device_node *node)
3344 {
3345 	struct resource res;
3346 	void __iomem *regs;
3347 	int ret;
3348 
3349 	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3350 	if (ret) {
3351 		dev_err(gbe_dev->dev,
3352 			"Can't translate of node(%pOFn) of gbe ss address at %d\n",
3353 			node, GBE_SS_REG_INDEX);
3354 		return ret;
3355 	}
3356 
3357 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3358 	if (IS_ERR(regs)) {
3359 		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3360 		return PTR_ERR(regs);
3361 	}
3362 	gbe_dev->ss_regs = regs;
3363 	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3364 	return 0;
3365 }
3366 
3367 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3368 				struct device_node *node)
3369 {
3370 	struct resource res;
3371 	void __iomem *regs;
3372 	int i, ret;
3373 
3374 	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3375 	if (ret) {
3376 		dev_err(gbe_dev->dev,
3377 			"Can't translate of gbe node(%pOFn) address at index %d\n",
3378 			node, GBE_SGMII34_REG_INDEX);
3379 		return ret;
3380 	}
3381 
3382 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3383 	if (IS_ERR(regs)) {
3384 		dev_err(gbe_dev->dev,
3385 			"Failed to map gbe sgmii port34 register base\n");
3386 		return PTR_ERR(regs);
3387 	}
3388 	gbe_dev->sgmii_port34_regs = regs;
3389 
3390 	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3391 	if (ret) {
3392 		dev_err(gbe_dev->dev,
3393 			"Can't translate of gbe node(%pOFn) address at index %d\n",
3394 			node, GBE_SM_REG_INDEX);
3395 		return ret;
3396 	}
3397 
3398 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3399 	if (IS_ERR(regs)) {
3400 		dev_err(gbe_dev->dev,
3401 			"Failed to map gbe switch module register base\n");
3402 		return PTR_ERR(regs);
3403 	}
3404 	gbe_dev->switch_regs = regs;
3405 
3406 	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3407 	gbe_dev->et_stats = gbe13_et_stats;
3408 	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3409 
3410 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3411 					 gbe_dev->num_et_stats, sizeof(u64),
3412 					 GFP_KERNEL);
3413 	if (!gbe_dev->hw_stats) {
3414 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3415 		return -ENOMEM;
3416 	}
3417 
3418 	gbe_dev->hw_stats_prev =
3419 		devm_kcalloc(gbe_dev->dev,
3420 			     gbe_dev->num_et_stats, sizeof(u32),
3421 			     GFP_KERNEL);
3422 	if (!gbe_dev->hw_stats_prev) {
3423 		dev_err(gbe_dev->dev,
3424 			"hw_stats_prev memory allocation failed\n");
3425 		return -ENOMEM;
3426 	}
3427 
3428 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3429 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3430 
3431 	/* K2HK has only 2 hw stats modules visible at a time, so
3432 	 * module 0 & 2 points to one base and
3433 	 * module 1 & 3 points to the other base
3434 	 */
3435 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3436 		gbe_dev->hw_stats_regs[i] =
3437 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3438 			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3439 	}
3440 
3441 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3442 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3443 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3444 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3445 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3446 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3447 
3448 	/* Subsystem registers */
3449 	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3450 
3451 	/* Switch module registers */
3452 	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3453 	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3454 	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3455 	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3456 	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3457 	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3458 
3459 	/* Host port registers */
3460 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3461 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3462 	return 0;
3463 }
3464 
3465 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3466 				struct device_node *node)
3467 {
3468 	struct resource res;
3469 	void __iomem *regs;
3470 	int i, ret;
3471 
3472 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3473 	gbe_dev->et_stats = gbenu_et_stats;
3474 
3475 	if (IS_SS_ID_MU(gbe_dev))
3476 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3477 			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3478 	else
3479 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3480 					GBENU_ET_STATS_PORT_SIZE;
3481 
3482 	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3483 					 gbe_dev->num_et_stats, sizeof(u64),
3484 					 GFP_KERNEL);
3485 	if (!gbe_dev->hw_stats) {
3486 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3487 		return -ENOMEM;
3488 	}
3489 
3490 	gbe_dev->hw_stats_prev =
3491 		devm_kcalloc(gbe_dev->dev,
3492 			     gbe_dev->num_et_stats, sizeof(u32),
3493 			     GFP_KERNEL);
3494 	if (!gbe_dev->hw_stats_prev) {
3495 		dev_err(gbe_dev->dev,
3496 			"hw_stats_prev memory allocation failed\n");
3497 		return -ENOMEM;
3498 	}
3499 
3500 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3501 	if (ret) {
3502 		dev_err(gbe_dev->dev,
3503 			"Can't translate of gbenu node(%pOFn) addr at index %d\n",
3504 			node, GBENU_SM_REG_INDEX);
3505 		return ret;
3506 	}
3507 
3508 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3509 	if (IS_ERR(regs)) {
3510 		dev_err(gbe_dev->dev,
3511 			"Failed to map gbenu switch module register base\n");
3512 		return PTR_ERR(regs);
3513 	}
3514 	gbe_dev->switch_regs = regs;
3515 
3516 	if (!IS_SS_ID_2U(gbe_dev))
3517 		gbe_dev->sgmii_port_regs =
3518 		       gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3519 
3520 	/* Although sgmii modules are mem mapped to one contiguous
3521 	 * region on GBENU devices, setting sgmii_port34_regs allows
3522 	 * consistent code when accessing sgmii api
3523 	 */
3524 	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3525 				     (2 * GBENU_SGMII_MODULE_SIZE);
3526 
3527 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3528 
3529 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3530 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3531 			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3532 
3533 	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3534 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3535 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3536 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3537 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3538 
3539 	/* Subsystem registers */
3540 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3541 	/* ok to set for MU, but used by 2U only */
3542 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3543 
3544 	/* Switch module registers */
3545 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3546 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3547 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3548 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3549 
3550 	/* Host port registers */
3551 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3552 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3553 
3554 	/* For NU only.  2U does not need tx_pri_map.
3555 	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3556 	 * while 2U has only 1 such thread
3557 	 */
3558 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3559 	return 0;
3560 }
3561 
3562 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3563 		     struct device_node *node, void **inst_priv)
3564 {
3565 	struct device_node *interfaces, *interface;
3566 	struct device_node *secondary_ports;
3567 	struct cpsw_ale_params ale_params;
3568 	struct gbe_priv *gbe_dev;
3569 	u32 slave_num;
3570 	int i, ret = 0;
3571 
3572 	if (!node) {
3573 		dev_err(dev, "device tree info unavailable\n");
3574 		return -ENODEV;
3575 	}
3576 
3577 	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3578 	if (!gbe_dev)
3579 		return -ENOMEM;
3580 
3581 	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3582 	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3583 		gbe_dev->max_num_slaves = 4;
3584 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3585 		gbe_dev->max_num_slaves = 8;
3586 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3587 		gbe_dev->max_num_slaves = 1;
3588 		gbe_module.set_rx_mode = gbe_set_rx_mode;
3589 	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3590 		gbe_dev->max_num_slaves = 2;
3591 	} else {
3592 		dev_err(dev, "device tree node for unknown device\n");
3593 		return -EINVAL;
3594 	}
3595 	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3596 
3597 	gbe_dev->dev = dev;
3598 	gbe_dev->netcp_device = netcp_device;
3599 	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3600 
3601 	/* init the hw stats lock */
3602 	spin_lock_init(&gbe_dev->hw_stats_lock);
3603 
3604 	if (of_find_property(node, "enable-ale", NULL)) {
3605 		gbe_dev->enable_ale = true;
3606 		dev_info(dev, "ALE enabled\n");
3607 	} else {
3608 		gbe_dev->enable_ale = false;
3609 		dev_dbg(dev, "ALE bypass enabled*\n");
3610 	}
3611 
3612 	ret = of_property_read_u32(node, "tx-queue",
3613 				   &gbe_dev->tx_queue_id);
3614 	if (ret < 0) {
3615 		dev_err(dev, "missing tx_queue parameter\n");
3616 		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3617 	}
3618 
3619 	ret = of_property_read_string(node, "tx-channel",
3620 				      &gbe_dev->dma_chan_name);
3621 	if (ret < 0) {
3622 		dev_err(dev, "missing \"tx-channel\" parameter\n");
3623 		return -EINVAL;
3624 	}
3625 
3626 	if (of_node_name_eq(node, "gbe")) {
3627 		ret = get_gbe_resource_version(gbe_dev, node);
3628 		if (ret)
3629 			return ret;
3630 
3631 		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3632 
3633 		if (IS_SS_ID_VER_14(gbe_dev))
3634 			ret = set_gbe_ethss14_priv(gbe_dev, node);
3635 		else if (IS_SS_ID_MU(gbe_dev))
3636 			ret = set_gbenu_ethss_priv(gbe_dev, node);
3637 		else
3638 			ret = -ENODEV;
3639 
3640 	} else if (of_node_name_eq(node, "xgbe")) {
3641 		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3642 		if (ret)
3643 			return ret;
3644 		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3645 					     gbe_dev->ss_regs);
3646 	} else {
3647 		dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3648 		ret = -ENODEV;
3649 	}
3650 
3651 	if (ret)
3652 		return ret;
3653 
3654 	interfaces = of_get_child_by_name(node, "interfaces");
3655 	if (!interfaces)
3656 		dev_err(dev, "could not find interfaces\n");
3657 
3658 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3659 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3660 	if (ret) {
3661 		of_node_put(interfaces);
3662 		return ret;
3663 	}
3664 
3665 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3666 	if (ret) {
3667 		of_node_put(interfaces);
3668 		return ret;
3669 	}
3670 
3671 	/* Create network interfaces */
3672 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3673 	for_each_child_of_node(interfaces, interface) {
3674 		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3675 		if (ret) {
3676 			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3677 				interface);
3678 			continue;
3679 		}
3680 		gbe_dev->num_slaves++;
3681 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3682 			of_node_put(interface);
3683 			break;
3684 		}
3685 	}
3686 	of_node_put(interfaces);
3687 
3688 	if (!gbe_dev->num_slaves)
3689 		dev_warn(dev, "No network interface configured\n");
3690 
3691 	/* Initialize Secondary slave ports */
3692 	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3693 	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3694 	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3695 		init_secondary_ports(gbe_dev, secondary_ports);
3696 	of_node_put(secondary_ports);
3697 
3698 	if (!gbe_dev->num_slaves) {
3699 		dev_err(dev,
3700 			"No network interface or secondary ports configured\n");
3701 		ret = -ENODEV;
3702 		goto free_sec_ports;
3703 	}
3704 
3705 	memset(&ale_params, 0, sizeof(ale_params));
3706 	ale_params.dev		= gbe_dev->dev;
3707 	ale_params.ale_regs	= gbe_dev->ale_reg;
3708 	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3709 	ale_params.ale_entries	= gbe_dev->ale_entries;
3710 	ale_params.ale_ports	= gbe_dev->ale_ports;
3711 	if (IS_SS_ID_MU(gbe_dev)) {
3712 		ale_params.major_ver_mask = 0x7;
3713 		ale_params.nu_switch_ale = true;
3714 	}
3715 	gbe_dev->ale = cpsw_ale_create(&ale_params);
3716 	if (!gbe_dev->ale) {
3717 		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3718 		ret = -ENODEV;
3719 		goto free_sec_ports;
3720 	} else {
3721 		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3722 	}
3723 
3724 	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
3725 	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3726 		ret = PTR_ERR(gbe_dev->cpts);
3727 		goto free_sec_ports;
3728 	}
3729 
3730 	/* initialize host port */
3731 	gbe_init_host_port(gbe_dev);
3732 
3733 	spin_lock_bh(&gbe_dev->hw_stats_lock);
3734 	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3735 		if (IS_SS_ID_VER_14(gbe_dev))
3736 			gbe_reset_mod_stats_ver14(gbe_dev, i);
3737 		else
3738 			gbe_reset_mod_stats(gbe_dev, i);
3739 	}
3740 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3741 
3742 	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3743 	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3744 	add_timer(&gbe_dev->timer);
3745 	*inst_priv = gbe_dev;
3746 	return 0;
3747 
3748 free_sec_ports:
3749 	free_secondary_ports(gbe_dev);
3750 	return ret;
3751 }
3752 
3753 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3754 		      struct device_node *node, void **intf_priv)
3755 {
3756 	struct gbe_priv *gbe_dev = inst_priv;
3757 	struct gbe_intf *gbe_intf;
3758 	int ret;
3759 
3760 	if (!node) {
3761 		dev_err(gbe_dev->dev, "interface node not available\n");
3762 		return -ENODEV;
3763 	}
3764 
3765 	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3766 	if (!gbe_intf)
3767 		return -ENOMEM;
3768 
3769 	gbe_intf->ndev = ndev;
3770 	gbe_intf->dev = gbe_dev->dev;
3771 	gbe_intf->gbe_dev = gbe_dev;
3772 
3773 	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3774 					sizeof(*gbe_intf->slave),
3775 					GFP_KERNEL);
3776 	if (!gbe_intf->slave) {
3777 		ret = -ENOMEM;
3778 		goto fail;
3779 	}
3780 
3781 	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3782 		ret = -ENODEV;
3783 		goto fail;
3784 	}
3785 
3786 	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3787 	ndev->ethtool_ops = &keystone_ethtool_ops;
3788 	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3789 	*intf_priv = gbe_intf;
3790 	return 0;
3791 
3792 fail:
3793 	if (gbe_intf->slave)
3794 		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3795 	if (gbe_intf)
3796 		devm_kfree(gbe_dev->dev, gbe_intf);
3797 	return ret;
3798 }
3799 
3800 static int gbe_release(void *intf_priv)
3801 {
3802 	struct gbe_intf *gbe_intf = intf_priv;
3803 
3804 	gbe_intf->ndev->ethtool_ops = NULL;
3805 	list_del(&gbe_intf->gbe_intf_list);
3806 	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3807 	devm_kfree(gbe_intf->dev, gbe_intf);
3808 	return 0;
3809 }
3810 
3811 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3812 {
3813 	struct gbe_priv *gbe_dev = inst_priv;
3814 
3815 	del_timer_sync(&gbe_dev->timer);
3816 	cpts_release(gbe_dev->cpts);
3817 	cpsw_ale_stop(gbe_dev->ale);
3818 	netcp_txpipe_close(&gbe_dev->tx_pipe);
3819 	free_secondary_ports(gbe_dev);
3820 
3821 	if (!list_empty(&gbe_dev->gbe_intf_head))
3822 		dev_alert(gbe_dev->dev,
3823 			  "unreleased ethss interfaces present\n");
3824 
3825 	return 0;
3826 }
3827 
3828 static struct netcp_module gbe_module = {
3829 	.name		= GBE_MODULE_NAME,
3830 	.owner		= THIS_MODULE,
3831 	.primary	= true,
3832 	.probe		= gbe_probe,
3833 	.open		= gbe_open,
3834 	.close		= gbe_close,
3835 	.remove		= gbe_remove,
3836 	.attach		= gbe_attach,
3837 	.release	= gbe_release,
3838 	.add_addr	= gbe_add_addr,
3839 	.del_addr	= gbe_del_addr,
3840 	.add_vid	= gbe_add_vid,
3841 	.del_vid	= gbe_del_vid,
3842 	.ioctl		= gbe_ioctl,
3843 };
3844 
3845 static struct netcp_module xgbe_module = {
3846 	.name		= XGBE_MODULE_NAME,
3847 	.owner		= THIS_MODULE,
3848 	.primary	= true,
3849 	.probe		= gbe_probe,
3850 	.open		= gbe_open,
3851 	.close		= gbe_close,
3852 	.remove		= gbe_remove,
3853 	.attach		= gbe_attach,
3854 	.release	= gbe_release,
3855 	.add_addr	= gbe_add_addr,
3856 	.del_addr	= gbe_del_addr,
3857 	.add_vid	= gbe_add_vid,
3858 	.del_vid	= gbe_del_vid,
3859 	.ioctl		= gbe_ioctl,
3860 };
3861 
3862 static int __init keystone_gbe_init(void)
3863 {
3864 	int ret;
3865 
3866 	ret = netcp_register_module(&gbe_module);
3867 	if (ret)
3868 		return ret;
3869 
3870 	ret = netcp_register_module(&xgbe_module);
3871 	if (ret)
3872 		return ret;
3873 
3874 	return 0;
3875 }
3876 module_init(keystone_gbe_init);
3877 
3878 static void __exit keystone_gbe_exit(void)
3879 {
3880 	netcp_unregister_module(&gbe_module);
3881 	netcp_unregister_module(&xgbe_module);
3882 }
3883 module_exit(keystone_gbe_exit);
3884 
3885 MODULE_LICENSE("GPL v2");
3886 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3887 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3888