xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_eth_soc.h (revision e05fd6274ee657cbf1689aba23ee900727cd4b8c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7  */
8 
9 #ifndef MTK_ETH_H
10 #define MTK_ETH_H
11 
12 #include <linux/dma-mapping.h>
13 #include <linux/netdevice.h>
14 #include <linux/of_net.h>
15 #include <linux/u64_stats_sync.h>
16 #include <linux/refcount.h>
17 #include <linux/phylink.h>
18 #include <linux/rhashtable.h>
19 #include <linux/dim.h>
20 #include <linux/bitfield.h>
21 #include <net/page_pool.h>
22 #include <linux/bpf_trace.h>
23 #include "mtk_ppe.h"
24 
25 #define MTK_MAX_DSA_PORTS	7
26 #define MTK_DSA_PORT_MASK	GENMASK(2, 0)
27 
28 #define MTK_QDMA_NUM_QUEUES	16
29 #define MTK_QDMA_PAGE_SIZE	2048
30 #define MTK_MAX_RX_LENGTH	1536
31 #define MTK_MAX_RX_LENGTH_2K	2048
32 #define MTK_TX_DMA_BUF_LEN	0x3fff
33 #define MTK_TX_DMA_BUF_LEN_V2	0xffff
34 #define MTK_QDMA_RING_SIZE	2048
35 #define MTK_DMA_SIZE		512
36 #define MTK_RX_ETH_HLEN		(ETH_HLEN + ETH_FCS_LEN)
37 #define MTK_RX_HLEN		(NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
38 #define MTK_DMA_DUMMY_DESC	0xffffffff
39 #define MTK_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV | \
40 				 NETIF_MSG_PROBE | \
41 				 NETIF_MSG_LINK | \
42 				 NETIF_MSG_TIMER | \
43 				 NETIF_MSG_IFDOWN | \
44 				 NETIF_MSG_IFUP | \
45 				 NETIF_MSG_RX_ERR | \
46 				 NETIF_MSG_TX_ERR)
47 #define MTK_HW_FEATURES		(NETIF_F_IP_CSUM | \
48 				 NETIF_F_RXCSUM | \
49 				 NETIF_F_HW_VLAN_CTAG_TX | \
50 				 NETIF_F_SG | NETIF_F_TSO | \
51 				 NETIF_F_TSO6 | \
52 				 NETIF_F_IPV6_CSUM |\
53 				 NETIF_F_HW_TC)
54 #define MTK_HW_FEATURES_MT7628	(NETIF_F_SG | NETIF_F_RXCSUM)
55 #define NEXT_DESP_IDX(X, Y)	(((X) + 1) & ((Y) - 1))
56 
57 #define MTK_PP_HEADROOM		XDP_PACKET_HEADROOM
58 #define MTK_PP_PAD		(MTK_PP_HEADROOM + \
59 				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
60 #define MTK_PP_MAX_BUF_SIZE	(PAGE_SIZE - MTK_PP_PAD)
61 
62 #define MTK_QRX_OFFSET		0x10
63 
64 #define MTK_MAX_RX_RING_NUM	4
65 #define MTK_HW_LRO_DMA_SIZE	8
66 
67 #define	MTK_MAX_LRO_RX_LENGTH		(4096 * 3)
68 #define	MTK_MAX_LRO_IP_CNT		2
69 #define	MTK_HW_LRO_TIMER_UNIT		1	/* 20 us */
70 #define	MTK_HW_LRO_REFRESH_TIME		50000	/* 1 sec. */
71 #define	MTK_HW_LRO_AGG_TIME		10	/* 200us */
72 #define	MTK_HW_LRO_AGE_TIME		50	/* 1ms */
73 #define	MTK_HW_LRO_MAX_AGG_CNT		64
74 #define	MTK_HW_LRO_BW_THRE		3000
75 #define	MTK_HW_LRO_REPLACE_DELTA	1000
76 #define	MTK_HW_LRO_SDL_REMAIN_ROOM	1522
77 
78 /* Frame Engine Global Configuration */
79 #define MTK_FE_GLO_CFG		0x00
80 #define MTK_FE_LINK_DOWN_P3	BIT(11)
81 #define MTK_FE_LINK_DOWN_P4	BIT(12)
82 
83 /* Frame Engine Global Reset Register */
84 #define MTK_RST_GL		0x04
85 #define RST_GL_PSE		BIT(0)
86 
87 /* Frame Engine Interrupt Status Register */
88 #define MTK_INT_STATUS2		0x08
89 #define MTK_FE_INT_ENABLE	0x0c
90 #define MTK_FE_INT_FQ_EMPTY	BIT(8)
91 #define MTK_FE_INT_TSO_FAIL	BIT(12)
92 #define MTK_FE_INT_TSO_ILLEGAL	BIT(13)
93 #define MTK_FE_INT_TSO_ALIGN	BIT(14)
94 #define MTK_FE_INT_RFIFO_OV	BIT(18)
95 #define MTK_FE_INT_RFIFO_UF	BIT(19)
96 #define MTK_GDM1_AF		BIT(28)
97 #define MTK_GDM2_AF		BIT(29)
98 
99 /* PDMA HW LRO Alter Flow Timer Register */
100 #define MTK_PDMA_LRO_ALT_REFRESH_TIMER	0x1c
101 
102 /* Frame Engine Interrupt Grouping Register */
103 #define MTK_FE_INT_GRP		0x20
104 
105 /* CDMP Ingress Control Register */
106 #define MTK_CDMQ_IG_CTRL	0x1400
107 #define MTK_CDMQ_STAG_EN	BIT(0)
108 
109 /* CDMQ Exgress Control Register */
110 #define MTK_CDMQ_EG_CTRL	0x1404
111 
112 /* CDMP Ingress Control Register */
113 #define MTK_CDMP_IG_CTRL	0x400
114 #define MTK_CDMP_STAG_EN	BIT(0)
115 
116 /* CDMP Exgress Control Register */
117 #define MTK_CDMP_EG_CTRL	0x404
118 
119 /* GDM Exgress Control Register */
120 #define MTK_GDMA_FWD_CFG(x)	(0x500 + (x * 0x1000))
121 #define MTK_GDMA_SPECIAL_TAG	BIT(24)
122 #define MTK_GDMA_ICS_EN		BIT(22)
123 #define MTK_GDMA_TCS_EN		BIT(21)
124 #define MTK_GDMA_UCS_EN		BIT(20)
125 #define MTK_GDMA_TO_PDMA	0x0
126 #define MTK_GDMA_DROP_ALL       0x7777
127 
128 /* Unicast Filter MAC Address Register - Low */
129 #define MTK_GDMA_MAC_ADRL(x)	(0x508 + (x * 0x1000))
130 
131 /* Unicast Filter MAC Address Register - High */
132 #define MTK_GDMA_MAC_ADRH(x)	(0x50C + (x * 0x1000))
133 
134 /* FE global misc reg*/
135 #define MTK_FE_GLO_MISC         0x124
136 
137 /* PSE Free Queue Flow Control  */
138 #define PSE_FQFC_CFG1		0x100
139 #define PSE_FQFC_CFG2		0x104
140 #define PSE_DROP_CFG		0x108
141 #define PSE_PPE0_DROP		0x110
142 
143 /* PSE Input Queue Reservation Register*/
144 #define PSE_IQ_REV(x)		(0x140 + (((x) - 1) << 2))
145 
146 /* PSE Output Queue Threshold Register*/
147 #define PSE_OQ_TH(x)		(0x160 + (((x) - 1) << 2))
148 
149 /* GDM and CDM Threshold */
150 #define MTK_GDM2_THRES		0x1530
151 #define MTK_CDMW0_THRES		0x164c
152 #define MTK_CDMW1_THRES		0x1650
153 #define MTK_CDME0_THRES		0x1654
154 #define MTK_CDME1_THRES		0x1658
155 #define MTK_CDMM_THRES		0x165c
156 
157 /* PDMA HW LRO Control Registers */
158 #define MTK_PDMA_LRO_CTRL_DW0	0x980
159 #define MTK_LRO_EN			BIT(0)
160 #define MTK_L3_CKS_UPD_EN		BIT(7)
161 #define MTK_L3_CKS_UPD_EN_V2		BIT(19)
162 #define MTK_LRO_ALT_PKT_CNT_MODE	BIT(21)
163 #define MTK_LRO_RING_RELINQUISH_REQ	(0x7 << 26)
164 #define MTK_LRO_RING_RELINQUISH_REQ_V2	(0xf << 24)
165 #define MTK_LRO_RING_RELINQUISH_DONE	(0x7 << 29)
166 #define MTK_LRO_RING_RELINQUISH_DONE_V2	(0xf << 28)
167 
168 #define MTK_PDMA_LRO_CTRL_DW1	0x984
169 #define MTK_PDMA_LRO_CTRL_DW2	0x988
170 #define MTK_PDMA_LRO_CTRL_DW3	0x98c
171 #define MTK_ADMA_MODE		BIT(15)
172 #define MTK_LRO_MIN_RXD_SDL	(MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
173 
174 #define MTK_RX_DMA_LRO_EN	BIT(8)
175 #define MTK_MULTI_EN		BIT(10)
176 #define MTK_PDMA_SIZE_8DWORDS	(1 << 4)
177 
178 /* PDMA Global Configuration Register */
179 #define MTK_PDMA_LRO_SDL	0x3000
180 #define MTK_RX_CFG_SDL_OFFSET	16
181 
182 /* PDMA Reset Index Register */
183 #define MTK_PST_DRX_IDX0	BIT(16)
184 #define MTK_PST_DRX_IDX_CFG(x)	(MTK_PST_DRX_IDX0 << (x))
185 
186 /* PDMA Delay Interrupt Register */
187 #define MTK_PDMA_DELAY_RX_MASK		GENMASK(15, 0)
188 #define MTK_PDMA_DELAY_RX_EN		BIT(15)
189 #define MTK_PDMA_DELAY_RX_PINT_SHIFT	8
190 #define MTK_PDMA_DELAY_RX_PTIME_SHIFT	0
191 
192 #define MTK_PDMA_DELAY_TX_MASK		GENMASK(31, 16)
193 #define MTK_PDMA_DELAY_TX_EN		BIT(31)
194 #define MTK_PDMA_DELAY_TX_PINT_SHIFT	24
195 #define MTK_PDMA_DELAY_TX_PTIME_SHIFT	16
196 
197 #define MTK_PDMA_DELAY_PINT_MASK	0x7f
198 #define MTK_PDMA_DELAY_PTIME_MASK	0xff
199 
200 /* PDMA HW LRO Alter Flow Delta Register */
201 #define MTK_PDMA_LRO_ALT_SCORE_DELTA	0xa4c
202 
203 /* PDMA HW LRO IP Setting Registers */
204 #define MTK_LRO_RX_RING0_DIP_DW0	0xb04
205 #define MTK_LRO_DIP_DW0_CFG(x)		(MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
206 #define MTK_RING_MYIP_VLD		BIT(9)
207 
208 /* PDMA HW LRO Ring Control Registers */
209 #define MTK_LRO_RX_RING0_CTRL_DW1	0xb28
210 #define MTK_LRO_RX_RING0_CTRL_DW2	0xb2c
211 #define MTK_LRO_RX_RING0_CTRL_DW3	0xb30
212 #define MTK_LRO_CTRL_DW1_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
213 #define MTK_LRO_CTRL_DW2_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
214 #define MTK_LRO_CTRL_DW3_CFG(x)		(MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
215 #define MTK_RING_AGE_TIME_L		((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
216 #define MTK_RING_AGE_TIME_H		((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
217 #define MTK_RING_AUTO_LERAN_MODE	(3 << 6)
218 #define MTK_RING_VLD			BIT(8)
219 #define MTK_RING_MAX_AGG_TIME		((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
220 #define MTK_RING_MAX_AGG_CNT_L		((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
221 #define MTK_RING_MAX_AGG_CNT_H		((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
222 
223 /* QDMA TX Queue Configuration Registers */
224 #define MTK_QTX_OFFSET		0x10
225 #define QDMA_RES_THRES		4
226 
227 /* QDMA Tx Queue Scheduler Configuration Registers */
228 #define MTK_QTX_SCH_TX_SEL		BIT(31)
229 #define MTK_QTX_SCH_TX_SEL_V2		GENMASK(31, 30)
230 
231 #define MTK_QTX_SCH_LEAKY_BUCKET_EN	BIT(30)
232 #define MTK_QTX_SCH_LEAKY_BUCKET_SIZE	GENMASK(29, 28)
233 #define MTK_QTX_SCH_MIN_RATE_EN		BIT(27)
234 #define MTK_QTX_SCH_MIN_RATE_MAN	GENMASK(26, 20)
235 #define MTK_QTX_SCH_MIN_RATE_EXP	GENMASK(19, 16)
236 #define MTK_QTX_SCH_MAX_RATE_WEIGHT	GENMASK(15, 12)
237 #define MTK_QTX_SCH_MAX_RATE_EN		BIT(11)
238 #define MTK_QTX_SCH_MAX_RATE_MAN	GENMASK(10, 4)
239 #define MTK_QTX_SCH_MAX_RATE_EXP	GENMASK(3, 0)
240 
241 /* QDMA TX Scheduler Rate Control Register */
242 #define MTK_QDMA_TX_SCH_MAX_WFQ		BIT(15)
243 
244 /* QDMA Global Configuration Register */
245 #define MTK_RX_2B_OFFSET	BIT(31)
246 #define MTK_RX_BT_32DWORDS	(3 << 11)
247 #define MTK_NDP_CO_PRO		BIT(10)
248 #define MTK_TX_WB_DDONE		BIT(6)
249 #define MTK_TX_BT_32DWORDS	(3 << 4)
250 #define MTK_RX_DMA_BUSY		BIT(3)
251 #define MTK_TX_DMA_BUSY		BIT(1)
252 #define MTK_RX_DMA_EN		BIT(2)
253 #define MTK_TX_DMA_EN		BIT(0)
254 #define MTK_DMA_BUSY_TIMEOUT_US	1000000
255 
256 /* QDMA V2 Global Configuration Register */
257 #define MTK_CHK_DDONE_EN	BIT(28)
258 #define MTK_DMAD_WR_WDONE	BIT(26)
259 #define MTK_WCOMP_EN		BIT(24)
260 #define MTK_RESV_BUF		(0x40 << 16)
261 #define MTK_MUTLI_CNT		(0x4 << 12)
262 #define MTK_LEAKY_BUCKET_EN	BIT(11)
263 
264 /* QDMA Flow Control Register */
265 #define FC_THRES_DROP_MODE	BIT(20)
266 #define FC_THRES_DROP_EN	(7 << 16)
267 #define FC_THRES_MIN		0x4444
268 
269 /* QDMA Interrupt Status Register */
270 #define MTK_RX_DONE_DLY		BIT(30)
271 #define MTK_TX_DONE_DLY		BIT(28)
272 #define MTK_RX_DONE_INT3	BIT(19)
273 #define MTK_RX_DONE_INT2	BIT(18)
274 #define MTK_RX_DONE_INT1	BIT(17)
275 #define MTK_RX_DONE_INT0	BIT(16)
276 #define MTK_TX_DONE_INT3	BIT(3)
277 #define MTK_TX_DONE_INT2	BIT(2)
278 #define MTK_TX_DONE_INT1	BIT(1)
279 #define MTK_TX_DONE_INT0	BIT(0)
280 #define MTK_RX_DONE_INT		MTK_RX_DONE_DLY
281 #define MTK_TX_DONE_INT		MTK_TX_DONE_DLY
282 
283 #define MTK_RX_DONE_INT_V2	BIT(14)
284 
285 #define MTK_CDM_TXFIFO_RDY	BIT(7)
286 
287 /* QDMA Interrupt grouping registers */
288 #define MTK_RLS_DONE_INT	BIT(0)
289 
290 #define MTK_STAT_OFFSET		0x40
291 
292 /* QDMA TX NUM */
293 #define QID_BITS_V2(x)		(((x) & 0x3f) << 16)
294 #define MTK_QDMA_GMAC2_QID	8
295 
296 #define MTK_TX_DMA_BUF_SHIFT	8
297 
298 /* QDMA V2 descriptor txd6 */
299 #define TX_DMA_INS_VLAN_V2	BIT(16)
300 /* QDMA V2 descriptor txd5 */
301 #define TX_DMA_CHKSUM_V2	(0x7 << 28)
302 #define TX_DMA_TSO_V2		BIT(31)
303 
304 /* QDMA V2 descriptor txd4 */
305 #define TX_DMA_FPORT_SHIFT_V2	8
306 #define TX_DMA_FPORT_MASK_V2	0xf
307 #define TX_DMA_SWC_V2		BIT(30)
308 
309 /* QDMA descriptor txd4 */
310 #define TX_DMA_CHKSUM		(0x7 << 29)
311 #define TX_DMA_TSO		BIT(28)
312 #define TX_DMA_FPORT_SHIFT	25
313 #define TX_DMA_FPORT_MASK	0x7
314 #define TX_DMA_INS_VLAN		BIT(16)
315 
316 /* QDMA descriptor txd3 */
317 #define TX_DMA_OWNER_CPU	BIT(31)
318 #define TX_DMA_LS0		BIT(30)
319 #define TX_DMA_PLEN0(x)		(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
320 #define TX_DMA_PLEN1(x)		((x) & eth->soc->txrx.dma_max_len)
321 #define TX_DMA_SWC		BIT(14)
322 #define TX_DMA_PQID		GENMASK(3, 0)
323 
324 /* PDMA on MT7628 */
325 #define TX_DMA_DONE		BIT(31)
326 #define TX_DMA_LS1		BIT(14)
327 #define TX_DMA_DESP2_DEF	(TX_DMA_LS0 | TX_DMA_DONE)
328 
329 /* QDMA descriptor rxd2 */
330 #define RX_DMA_DONE		BIT(31)
331 #define RX_DMA_LSO		BIT(30)
332 #define RX_DMA_PREP_PLEN0(x)	(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
333 #define RX_DMA_GET_PLEN0(x)	(((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
334 #define RX_DMA_VTAG		BIT(15)
335 
336 /* QDMA descriptor rxd3 */
337 #define RX_DMA_VID(x)		((x) & VLAN_VID_MASK)
338 #define RX_DMA_TCI(x)		((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
339 #define RX_DMA_VPID(x)		(((x) >> 16) & 0xffff)
340 
341 /* QDMA descriptor rxd4 */
342 #define MTK_RXD4_FOE_ENTRY	GENMASK(13, 0)
343 #define MTK_RXD4_PPE_CPU_REASON	GENMASK(18, 14)
344 #define MTK_RXD4_SRC_PORT	GENMASK(21, 19)
345 #define MTK_RXD4_ALG		GENMASK(31, 22)
346 
347 /* QDMA descriptor rxd4 */
348 #define RX_DMA_L4_VALID		BIT(24)
349 #define RX_DMA_L4_VALID_PDMA	BIT(30)		/* when PDMA is used */
350 #define RX_DMA_SPECIAL_TAG	BIT(22)
351 
352 /* PDMA descriptor rxd5 */
353 #define MTK_RXD5_FOE_ENTRY	GENMASK(14, 0)
354 #define MTK_RXD5_PPE_CPU_REASON	GENMASK(22, 18)
355 #define MTK_RXD5_SRC_PORT	GENMASK(29, 26)
356 
357 #define RX_DMA_GET_SPORT(x)	(((x) >> 19) & 0x7)
358 #define RX_DMA_GET_SPORT_V2(x)	(((x) >> 26) & 0xf)
359 
360 /* PDMA V2 descriptor rxd3 */
361 #define RX_DMA_VTAG_V2		BIT(0)
362 #define RX_DMA_L4_VALID_V2	BIT(2)
363 
364 /* PHY Polling and SMI Master Control registers */
365 #define MTK_PPSC		0x10000
366 #define PPSC_MDC_CFG		GENMASK(29, 24)
367 #define PPSC_MDC_TURBO		BIT(20)
368 #define MDC_MAX_FREQ		25000000
369 #define MDC_MAX_DIVIDER		63
370 
371 /* PHY Indirect Access Control registers */
372 #define MTK_PHY_IAC		0x10004
373 #define PHY_IAC_ACCESS		BIT(31)
374 #define PHY_IAC_REG_MASK	GENMASK(29, 25)
375 #define PHY_IAC_REG(x)		FIELD_PREP(PHY_IAC_REG_MASK, (x))
376 #define PHY_IAC_ADDR_MASK	GENMASK(24, 20)
377 #define PHY_IAC_ADDR(x)		FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
378 #define PHY_IAC_CMD_MASK	GENMASK(19, 18)
379 #define PHY_IAC_CMD_C45_ADDR	FIELD_PREP(PHY_IAC_CMD_MASK, 0)
380 #define PHY_IAC_CMD_WRITE	FIELD_PREP(PHY_IAC_CMD_MASK, 1)
381 #define PHY_IAC_CMD_C22_READ	FIELD_PREP(PHY_IAC_CMD_MASK, 2)
382 #define PHY_IAC_CMD_C45_READ	FIELD_PREP(PHY_IAC_CMD_MASK, 3)
383 #define PHY_IAC_START_MASK	GENMASK(17, 16)
384 #define PHY_IAC_START_C45	FIELD_PREP(PHY_IAC_START_MASK, 0)
385 #define PHY_IAC_START_C22	FIELD_PREP(PHY_IAC_START_MASK, 1)
386 #define PHY_IAC_DATA_MASK	GENMASK(15, 0)
387 #define PHY_IAC_DATA(x)		FIELD_PREP(PHY_IAC_DATA_MASK, (x))
388 #define PHY_IAC_TIMEOUT		HZ
389 
390 #define MTK_MAC_MISC		0x1000c
391 #define MTK_MUX_TO_ESW		BIT(0)
392 
393 /* Mac control registers */
394 #define MTK_MAC_MCR(x)		(0x10100 + (x * 0x100))
395 #define MAC_MCR_MAX_RX_MASK	GENMASK(25, 24)
396 #define MAC_MCR_MAX_RX(_x)	(MAC_MCR_MAX_RX_MASK & ((_x) << 24))
397 #define MAC_MCR_MAX_RX_1518	0x0
398 #define MAC_MCR_MAX_RX_1536	0x1
399 #define MAC_MCR_MAX_RX_1552	0x2
400 #define MAC_MCR_MAX_RX_2048	0x3
401 #define MAC_MCR_IPG_CFG		(BIT(18) | BIT(16))
402 #define MAC_MCR_FORCE_MODE	BIT(15)
403 #define MAC_MCR_TX_EN		BIT(14)
404 #define MAC_MCR_RX_EN		BIT(13)
405 #define MAC_MCR_RX_FIFO_CLR_DIS	BIT(12)
406 #define MAC_MCR_BACKOFF_EN	BIT(9)
407 #define MAC_MCR_BACKPR_EN	BIT(8)
408 #define MAC_MCR_FORCE_RX_FC	BIT(5)
409 #define MAC_MCR_FORCE_TX_FC	BIT(4)
410 #define MAC_MCR_SPEED_1000	BIT(3)
411 #define MAC_MCR_SPEED_100	BIT(2)
412 #define MAC_MCR_FORCE_DPX	BIT(1)
413 #define MAC_MCR_FORCE_LINK	BIT(0)
414 #define MAC_MCR_FORCE_LINK_DOWN	(MAC_MCR_FORCE_MODE)
415 
416 /* Mac status registers */
417 #define MTK_MAC_MSR(x)		(0x10108 + (x * 0x100))
418 #define MAC_MSR_EEE1G		BIT(7)
419 #define MAC_MSR_EEE100M		BIT(6)
420 #define MAC_MSR_RX_FC		BIT(5)
421 #define MAC_MSR_TX_FC		BIT(4)
422 #define MAC_MSR_SPEED_1000	BIT(3)
423 #define MAC_MSR_SPEED_100	BIT(2)
424 #define MAC_MSR_SPEED_MASK	(MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
425 #define MAC_MSR_DPX		BIT(1)
426 #define MAC_MSR_LINK		BIT(0)
427 
428 /* TRGMII RXC control register */
429 #define TRGMII_RCK_CTRL		0x10300
430 #define DQSI0(x)		((x << 0) & GENMASK(6, 0))
431 #define DQSI1(x)		((x << 8) & GENMASK(14, 8))
432 #define RXCTL_DMWTLAT(x)	((x << 16) & GENMASK(18, 16))
433 #define RXC_RST			BIT(31)
434 #define RXC_DQSISEL		BIT(30)
435 #define RCK_CTRL_RGMII_1000	(RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
436 #define RCK_CTRL_RGMII_10_100	RXCTL_DMWTLAT(2)
437 
438 #define NUM_TRGMII_CTRL		5
439 
440 /* TRGMII RXC control register */
441 #define TRGMII_TCK_CTRL		0x10340
442 #define TXCTL_DMWTLAT(x)	((x << 16) & GENMASK(18, 16))
443 #define TXC_INV			BIT(30)
444 #define TCK_CTRL_RGMII_1000	TXCTL_DMWTLAT(2)
445 #define TCK_CTRL_RGMII_10_100	(TXC_INV | TXCTL_DMWTLAT(2))
446 
447 /* TRGMII TX Drive Strength */
448 #define TRGMII_TD_ODT(i)	(0x10354 + 8 * (i))
449 #define  TD_DM_DRVP(x)		((x) & 0xf)
450 #define  TD_DM_DRVN(x)		(((x) & 0xf) << 4)
451 
452 /* TRGMII Interface mode register */
453 #define INTF_MODE		0x10390
454 #define TRGMII_INTF_DIS		BIT(0)
455 #define TRGMII_MODE		BIT(1)
456 #define TRGMII_CENTRAL_ALIGNED	BIT(2)
457 #define INTF_MODE_RGMII_1000    (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
458 #define INTF_MODE_RGMII_10_100  0
459 
460 /* GPIO port control registers for GMAC 2*/
461 #define GPIO_OD33_CTRL8		0x4c0
462 #define GPIO_BIAS_CTRL		0xed0
463 #define GPIO_DRV_SEL10		0xf00
464 
465 /* ethernet subsystem chip id register */
466 #define ETHSYS_CHIPID0_3	0x0
467 #define ETHSYS_CHIPID4_7	0x4
468 #define MT7623_ETH		7623
469 #define MT7622_ETH		7622
470 #define MT7621_ETH		7621
471 
472 /* ethernet system control register */
473 #define ETHSYS_SYSCFG		0x10
474 #define SYSCFG_DRAM_TYPE_DDR2	BIT(4)
475 
476 /* ethernet subsystem config register */
477 #define ETHSYS_SYSCFG0		0x14
478 #define SYSCFG0_GE_MASK		0x3
479 #define SYSCFG0_GE_MODE(x, y)	(x << (12 + (y * 2)))
480 #define SYSCFG0_SGMII_MASK     GENMASK(9, 8)
481 #define SYSCFG0_SGMII_GMAC1    ((2 << 8) & SYSCFG0_SGMII_MASK)
482 #define SYSCFG0_SGMII_GMAC2    ((3 << 8) & SYSCFG0_SGMII_MASK)
483 #define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
484 #define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
485 
486 
487 /* ethernet subsystem clock register */
488 #define ETHSYS_CLKCFG0		0x2c
489 #define ETHSYS_TRGMII_CLK_SEL362_5	BIT(11)
490 #define ETHSYS_TRGMII_MT7621_MASK	(BIT(5) | BIT(6))
491 #define ETHSYS_TRGMII_MT7621_APLL	BIT(6)
492 #define ETHSYS_TRGMII_MT7621_DDR_PLL	BIT(5)
493 
494 /* ethernet reset control register */
495 #define ETHSYS_RSTCTRL			0x34
496 #define RSTCTRL_FE			BIT(6)
497 #define RSTCTRL_PPE0			BIT(31)
498 #define RSTCTRL_PPE0_V2			BIT(30)
499 #define RSTCTRL_PPE1			BIT(31)
500 #define RSTCTRL_ETH			BIT(23)
501 
502 /* ethernet reset check idle register */
503 #define ETHSYS_FE_RST_CHK_IDLE_EN	0x28
504 
505 /* ethernet dma channel agent map */
506 #define ETHSYS_DMA_AG_MAP	0x408
507 #define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
508 #define ETHSYS_DMA_AG_MAP_QDMA	BIT(1)
509 #define ETHSYS_DMA_AG_MAP_PPE	BIT(2)
510 
511 /* Infrasys subsystem config registers */
512 #define INFRA_MISC2            0x70c
513 #define CO_QPHY_SEL            BIT(0)
514 #define GEPHY_MAC_SEL          BIT(1)
515 
516 /* Top misc registers */
517 #define USB_PHY_SWITCH_REG	0x218
518 #define QPHY_SEL_MASK		GENMASK(1, 0)
519 #define SGMII_QPHY_SEL		0x2
520 
521 /* MT7628/88 specific stuff */
522 #define MT7628_PDMA_OFFSET	0x0800
523 #define MT7628_SDM_OFFSET	0x0c00
524 
525 #define MT7628_TX_BASE_PTR0	(MT7628_PDMA_OFFSET + 0x00)
526 #define MT7628_TX_MAX_CNT0	(MT7628_PDMA_OFFSET + 0x04)
527 #define MT7628_TX_CTX_IDX0	(MT7628_PDMA_OFFSET + 0x08)
528 #define MT7628_TX_DTX_IDX0	(MT7628_PDMA_OFFSET + 0x0c)
529 #define MT7628_PST_DTX_IDX0	BIT(0)
530 
531 #define MT7628_SDM_MAC_ADRL	(MT7628_SDM_OFFSET + 0x0c)
532 #define MT7628_SDM_MAC_ADRH	(MT7628_SDM_OFFSET + 0x10)
533 
534 /* Counter / stat register */
535 #define MT7628_SDM_TPCNT	(MT7628_SDM_OFFSET + 0x100)
536 #define MT7628_SDM_TBCNT	(MT7628_SDM_OFFSET + 0x104)
537 #define MT7628_SDM_RPCNT	(MT7628_SDM_OFFSET + 0x108)
538 #define MT7628_SDM_RBCNT	(MT7628_SDM_OFFSET + 0x10c)
539 #define MT7628_SDM_CS_ERR	(MT7628_SDM_OFFSET + 0x110)
540 
541 #define MTK_FE_CDM1_FSM		0x220
542 #define MTK_FE_CDM2_FSM		0x224
543 #define MTK_FE_CDM3_FSM		0x238
544 #define MTK_FE_CDM4_FSM		0x298
545 #define MTK_FE_CDM5_FSM		0x318
546 #define MTK_FE_CDM6_FSM		0x328
547 #define MTK_FE_GDM1_FSM		0x228
548 #define MTK_FE_GDM2_FSM		0x22C
549 
550 #define MTK_MAC_FSM(x)		(0x1010C + ((x) * 0x100))
551 
552 struct mtk_rx_dma {
553 	unsigned int rxd1;
554 	unsigned int rxd2;
555 	unsigned int rxd3;
556 	unsigned int rxd4;
557 } __packed __aligned(4);
558 
559 struct mtk_rx_dma_v2 {
560 	unsigned int rxd1;
561 	unsigned int rxd2;
562 	unsigned int rxd3;
563 	unsigned int rxd4;
564 	unsigned int rxd5;
565 	unsigned int rxd6;
566 	unsigned int rxd7;
567 	unsigned int rxd8;
568 } __packed __aligned(4);
569 
570 struct mtk_tx_dma {
571 	unsigned int txd1;
572 	unsigned int txd2;
573 	unsigned int txd3;
574 	unsigned int txd4;
575 } __packed __aligned(4);
576 
577 struct mtk_tx_dma_v2 {
578 	unsigned int txd1;
579 	unsigned int txd2;
580 	unsigned int txd3;
581 	unsigned int txd4;
582 	unsigned int txd5;
583 	unsigned int txd6;
584 	unsigned int txd7;
585 	unsigned int txd8;
586 } __packed __aligned(4);
587 
588 struct mtk_eth;
589 struct mtk_mac;
590 
591 struct mtk_xdp_stats {
592 	u64 rx_xdp_redirect;
593 	u64 rx_xdp_pass;
594 	u64 rx_xdp_drop;
595 	u64 rx_xdp_tx;
596 	u64 rx_xdp_tx_errors;
597 	u64 tx_xdp_xmit;
598 	u64 tx_xdp_xmit_errors;
599 };
600 
601 /* struct mtk_hw_stats - the structure that holds the traffic statistics.
602  * @stats_lock:		make sure that stats operations are atomic
603  * @reg_offset:		the status register offset of the SoC
604  * @syncp:		the refcount
605  *
606  * All of the supported SoCs have hardware counters for traffic statistics.
607  * Whenever the status IRQ triggers we can read the latest stats from these
608  * counters and store them in this struct.
609  */
610 struct mtk_hw_stats {
611 	u64 tx_bytes;
612 	u64 tx_packets;
613 	u64 tx_skip;
614 	u64 tx_collisions;
615 	u64 rx_bytes;
616 	u64 rx_packets;
617 	u64 rx_overflow;
618 	u64 rx_fcs_errors;
619 	u64 rx_short_errors;
620 	u64 rx_long_errors;
621 	u64 rx_checksum_errors;
622 	u64 rx_flow_control_packets;
623 
624 	struct mtk_xdp_stats	xdp_stats;
625 
626 	spinlock_t		stats_lock;
627 	u32			reg_offset;
628 	struct u64_stats_sync	syncp;
629 };
630 
631 enum mtk_tx_flags {
632 	/* PDMA descriptor can point at 1-2 segments. This enum allows us to
633 	 * track how memory was allocated so that it can be freed properly.
634 	 */
635 	MTK_TX_FLAGS_SINGLE0	= 0x01,
636 	MTK_TX_FLAGS_PAGE0	= 0x02,
637 
638 	/* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
639 	 * SKB out instead of looking up through hardware TX descriptor.
640 	 */
641 	MTK_TX_FLAGS_FPORT0	= 0x04,
642 	MTK_TX_FLAGS_FPORT1	= 0x08,
643 };
644 
645 /* This enum allows us to identify how the clock is defined on the array of the
646  * clock in the order
647  */
648 enum mtk_clks_map {
649 	MTK_CLK_ETHIF,
650 	MTK_CLK_SGMIITOP,
651 	MTK_CLK_ESW,
652 	MTK_CLK_GP0,
653 	MTK_CLK_GP1,
654 	MTK_CLK_GP2,
655 	MTK_CLK_FE,
656 	MTK_CLK_TRGPLL,
657 	MTK_CLK_SGMII_TX_250M,
658 	MTK_CLK_SGMII_RX_250M,
659 	MTK_CLK_SGMII_CDR_REF,
660 	MTK_CLK_SGMII_CDR_FB,
661 	MTK_CLK_SGMII2_TX_250M,
662 	MTK_CLK_SGMII2_RX_250M,
663 	MTK_CLK_SGMII2_CDR_REF,
664 	MTK_CLK_SGMII2_CDR_FB,
665 	MTK_CLK_SGMII_CK,
666 	MTK_CLK_ETH2PLL,
667 	MTK_CLK_WOCPU0,
668 	MTK_CLK_WOCPU1,
669 	MTK_CLK_NETSYS0,
670 	MTK_CLK_NETSYS1,
671 	MTK_CLK_MAX
672 };
673 
674 #define MT7623_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
675 				 BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
676 				 BIT(MTK_CLK_TRGPLL))
677 #define MT7622_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
678 				 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
679 				 BIT(MTK_CLK_GP2) | \
680 				 BIT(MTK_CLK_SGMII_TX_250M) | \
681 				 BIT(MTK_CLK_SGMII_RX_250M) | \
682 				 BIT(MTK_CLK_SGMII_CDR_REF) | \
683 				 BIT(MTK_CLK_SGMII_CDR_FB) | \
684 				 BIT(MTK_CLK_SGMII_CK) | \
685 				 BIT(MTK_CLK_ETH2PLL))
686 #define MT7621_CLKS_BITMAP	(0)
687 #define MT7628_CLKS_BITMAP	(0)
688 #define MT7629_CLKS_BITMAP	(BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) |  \
689 				 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
690 				 BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
691 				 BIT(MTK_CLK_SGMII_TX_250M) | \
692 				 BIT(MTK_CLK_SGMII_RX_250M) | \
693 				 BIT(MTK_CLK_SGMII_CDR_REF) | \
694 				 BIT(MTK_CLK_SGMII_CDR_FB) | \
695 				 BIT(MTK_CLK_SGMII2_TX_250M) | \
696 				 BIT(MTK_CLK_SGMII2_RX_250M) | \
697 				 BIT(MTK_CLK_SGMII2_CDR_REF) | \
698 				 BIT(MTK_CLK_SGMII2_CDR_FB) | \
699 				 BIT(MTK_CLK_SGMII_CK) | \
700 				 BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
701 #define MT7981_CLKS_BITMAP	(BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
702 				 BIT(MTK_CLK_WOCPU0) | \
703 				 BIT(MTK_CLK_SGMII_TX_250M) | \
704 				 BIT(MTK_CLK_SGMII_RX_250M) | \
705 				 BIT(MTK_CLK_SGMII_CDR_REF) | \
706 				 BIT(MTK_CLK_SGMII_CDR_FB) | \
707 				 BIT(MTK_CLK_SGMII2_TX_250M) | \
708 				 BIT(MTK_CLK_SGMII2_RX_250M) | \
709 				 BIT(MTK_CLK_SGMII2_CDR_REF) | \
710 				 BIT(MTK_CLK_SGMII2_CDR_FB) | \
711 				 BIT(MTK_CLK_SGMII_CK))
712 #define MT7986_CLKS_BITMAP	(BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
713 				 BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
714 				 BIT(MTK_CLK_SGMII_TX_250M) | \
715 				 BIT(MTK_CLK_SGMII_RX_250M) | \
716 				 BIT(MTK_CLK_SGMII_CDR_REF) | \
717 				 BIT(MTK_CLK_SGMII_CDR_FB) | \
718 				 BIT(MTK_CLK_SGMII2_TX_250M) | \
719 				 BIT(MTK_CLK_SGMII2_RX_250M) | \
720 				 BIT(MTK_CLK_SGMII2_CDR_REF) | \
721 				 BIT(MTK_CLK_SGMII2_CDR_FB))
722 
723 enum mtk_dev_state {
724 	MTK_HW_INIT,
725 	MTK_RESETTING
726 };
727 
728 enum mtk_tx_buf_type {
729 	MTK_TYPE_SKB,
730 	MTK_TYPE_XDP_TX,
731 	MTK_TYPE_XDP_NDO,
732 };
733 
734 /* struct mtk_tx_buf -	This struct holds the pointers to the memory pointed at
735  *			by the TX descriptor	s
736  * @skb:		The SKB pointer of the packet being sent
737  * @dma_addr0:		The base addr of the first segment
738  * @dma_len0:		The length of the first segment
739  * @dma_addr1:		The base addr of the second segment
740  * @dma_len1:		The length of the second segment
741  */
742 struct mtk_tx_buf {
743 	enum mtk_tx_buf_type type;
744 	void *data;
745 
746 	u32 flags;
747 	DEFINE_DMA_UNMAP_ADDR(dma_addr0);
748 	DEFINE_DMA_UNMAP_LEN(dma_len0);
749 	DEFINE_DMA_UNMAP_ADDR(dma_addr1);
750 	DEFINE_DMA_UNMAP_LEN(dma_len1);
751 };
752 
753 /* struct mtk_tx_ring -	This struct holds info describing a TX ring
754  * @dma:		The descriptor ring
755  * @buf:		The memory pointed at by the ring
756  * @phys:		The physical addr of tx_buf
757  * @next_free:		Pointer to the next free descriptor
758  * @last_free:		Pointer to the last free descriptor
759  * @last_free_ptr:	Hardware pointer value of the last free descriptor
760  * @thresh:		The threshold of minimum amount of free descriptors
761  * @free_count:		QDMA uses a linked list. Track how many free descriptors
762  *			are present
763  */
764 struct mtk_tx_ring {
765 	void *dma;
766 	struct mtk_tx_buf *buf;
767 	dma_addr_t phys;
768 	struct mtk_tx_dma *next_free;
769 	struct mtk_tx_dma *last_free;
770 	u32 last_free_ptr;
771 	u16 thresh;
772 	atomic_t free_count;
773 	int dma_size;
774 	struct mtk_tx_dma *dma_pdma;	/* For MT7628/88 PDMA handling */
775 	dma_addr_t phys_pdma;
776 	int cpu_idx;
777 };
778 
779 /* PDMA rx ring mode */
780 enum mtk_rx_flags {
781 	MTK_RX_FLAGS_NORMAL = 0,
782 	MTK_RX_FLAGS_HWLRO,
783 	MTK_RX_FLAGS_QDMA,
784 };
785 
786 /* struct mtk_rx_ring -	This struct holds info describing a RX ring
787  * @dma:		The descriptor ring
788  * @data:		The memory pointed at by the ring
789  * @phys:		The physical addr of rx_buf
790  * @frag_size:		How big can each fragment be
791  * @buf_size:		The size of each packet buffer
792  * @calc_idx:		The current head of ring
793  */
794 struct mtk_rx_ring {
795 	void *dma;
796 	u8 **data;
797 	dma_addr_t phys;
798 	u16 frag_size;
799 	u16 buf_size;
800 	u16 dma_size;
801 	bool calc_idx_update;
802 	u16 calc_idx;
803 	u32 crx_idx_reg;
804 	/* page_pool */
805 	struct page_pool *page_pool;
806 	struct xdp_rxq_info xdp_q;
807 };
808 
809 enum mkt_eth_capabilities {
810 	MTK_RGMII_BIT = 0,
811 	MTK_TRGMII_BIT,
812 	MTK_SGMII_BIT,
813 	MTK_ESW_BIT,
814 	MTK_GEPHY_BIT,
815 	MTK_MUX_BIT,
816 	MTK_INFRA_BIT,
817 	MTK_SHARED_SGMII_BIT,
818 	MTK_HWLRO_BIT,
819 	MTK_SHARED_INT_BIT,
820 	MTK_TRGMII_MT7621_CLK_BIT,
821 	MTK_QDMA_BIT,
822 	MTK_SOC_MT7628_BIT,
823 	MTK_RSTCTRL_PPE1_BIT,
824 	MTK_U3_COPHY_V2_BIT,
825 
826 	/* MUX BITS*/
827 	MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
828 	MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
829 	MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
830 	MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
831 	MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
832 
833 	/* PATH BITS */
834 	MTK_ETH_PATH_GMAC1_RGMII_BIT,
835 	MTK_ETH_PATH_GMAC1_TRGMII_BIT,
836 	MTK_ETH_PATH_GMAC1_SGMII_BIT,
837 	MTK_ETH_PATH_GMAC2_RGMII_BIT,
838 	MTK_ETH_PATH_GMAC2_SGMII_BIT,
839 	MTK_ETH_PATH_GMAC2_GEPHY_BIT,
840 	MTK_ETH_PATH_GDM1_ESW_BIT,
841 };
842 
843 /* Supported hardware group on SoCs */
844 #define MTK_RGMII		BIT(MTK_RGMII_BIT)
845 #define MTK_TRGMII		BIT(MTK_TRGMII_BIT)
846 #define MTK_SGMII		BIT(MTK_SGMII_BIT)
847 #define MTK_ESW			BIT(MTK_ESW_BIT)
848 #define MTK_GEPHY		BIT(MTK_GEPHY_BIT)
849 #define MTK_MUX			BIT(MTK_MUX_BIT)
850 #define MTK_INFRA		BIT(MTK_INFRA_BIT)
851 #define MTK_SHARED_SGMII	BIT(MTK_SHARED_SGMII_BIT)
852 #define MTK_HWLRO		BIT(MTK_HWLRO_BIT)
853 #define MTK_SHARED_INT		BIT(MTK_SHARED_INT_BIT)
854 #define MTK_TRGMII_MT7621_CLK	BIT(MTK_TRGMII_MT7621_CLK_BIT)
855 #define MTK_QDMA		BIT(MTK_QDMA_BIT)
856 #define MTK_SOC_MT7628		BIT(MTK_SOC_MT7628_BIT)
857 #define MTK_RSTCTRL_PPE1	BIT(MTK_RSTCTRL_PPE1_BIT)
858 #define MTK_U3_COPHY_V2		BIT(MTK_U3_COPHY_V2_BIT)
859 
860 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW		\
861 	BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
862 #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY	\
863 	BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
864 #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY		\
865 	BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
866 #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII	\
867 	BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
868 #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII	\
869 	BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
870 
871 /* Supported path present on SoCs */
872 #define MTK_ETH_PATH_GMAC1_RGMII	BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
873 #define MTK_ETH_PATH_GMAC1_TRGMII	BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
874 #define MTK_ETH_PATH_GMAC1_SGMII	BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
875 #define MTK_ETH_PATH_GMAC2_RGMII	BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
876 #define MTK_ETH_PATH_GMAC2_SGMII	BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
877 #define MTK_ETH_PATH_GMAC2_GEPHY	BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
878 #define MTK_ETH_PATH_GDM1_ESW		BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
879 
880 #define MTK_GMAC1_RGMII		(MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
881 #define MTK_GMAC1_TRGMII	(MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
882 #define MTK_GMAC1_SGMII		(MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
883 #define MTK_GMAC2_RGMII		(MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
884 #define MTK_GMAC2_SGMII		(MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
885 #define MTK_GMAC2_GEPHY		(MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
886 #define MTK_GDM1_ESW		(MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
887 
888 /* MUXes present on SoCs */
889 /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
890 #define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
891 
892 /* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
893 #define MTK_MUX_GMAC2_GMAC0_TO_GEPHY    \
894 	(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
895 
896 /* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
897 #define MTK_MUX_U3_GMAC2_TO_QPHY        \
898 	(MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
899 
900 /* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
901 #define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII      \
902 	(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
903 	MTK_SHARED_SGMII)
904 
905 /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
906 #define MTK_MUX_GMAC12_TO_GEPHY_SGMII   \
907 	(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
908 
909 #define MTK_HAS_CAPS(caps, _x)		(((caps) & (_x)) == (_x))
910 
911 #define MT7621_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
912 		      MTK_GMAC2_RGMII | MTK_SHARED_INT | \
913 		      MTK_TRGMII_MT7621_CLK | MTK_QDMA)
914 
915 #define MT7622_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
916 		      MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
917 		      MTK_MUX_GDM1_TO_GMAC1_ESW | \
918 		      MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
919 
920 #define MT7623_CAPS  (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
921 		      MTK_QDMA)
922 
923 #define MT7628_CAPS  (MTK_SHARED_INT | MTK_SOC_MT7628)
924 
925 #define MT7629_CAPS  (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
926 		      MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
927 		      MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
928 		      MTK_MUX_U3_GMAC2_TO_QPHY | \
929 		      MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
930 
931 #define MT7981_CAPS  (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
932 		      MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
933 		      MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
934 		      MTK_RSTCTRL_PPE1)
935 
936 #define MT7986_CAPS  (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
937 		      MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
938 		      MTK_RSTCTRL_PPE1)
939 
940 struct mtk_tx_dma_desc_info {
941 	dma_addr_t	addr;
942 	u32		size;
943 	u16		vlan_tci;
944 	u16		qid;
945 	u8		gso:1;
946 	u8		csum:1;
947 	u8		vlan:1;
948 	u8		first:1;
949 	u8		last:1;
950 };
951 
952 struct mtk_reg_map {
953 	u32	tx_irq_mask;
954 	u32	tx_irq_status;
955 	struct {
956 		u32	rx_ptr;		/* rx base pointer */
957 		u32	rx_cnt_cfg;	/* rx max count configuration */
958 		u32	pcrx_ptr;	/* rx cpu pointer */
959 		u32	glo_cfg;	/* global configuration */
960 		u32	rst_idx;	/* reset index */
961 		u32	delay_irq;	/* delay interrupt */
962 		u32	irq_status;	/* interrupt status */
963 		u32	irq_mask;	/* interrupt mask */
964 		u32	adma_rx_dbg0;
965 		u32	int_grp;
966 	} pdma;
967 	struct {
968 		u32	qtx_cfg;	/* tx queue configuration */
969 		u32	qtx_sch;	/* tx queue scheduler configuration */
970 		u32	rx_ptr;		/* rx base pointer */
971 		u32	rx_cnt_cfg;	/* rx max count configuration */
972 		u32	qcrx_ptr;	/* rx cpu pointer */
973 		u32	glo_cfg;	/* global configuration */
974 		u32	rst_idx;	/* reset index */
975 		u32	delay_irq;	/* delay interrupt */
976 		u32	fc_th;		/* flow control */
977 		u32	int_grp;
978 		u32	hred;		/* interrupt mask */
979 		u32	ctx_ptr;	/* tx acquire cpu pointer */
980 		u32	dtx_ptr;	/* tx acquire dma pointer */
981 		u32	crx_ptr;	/* tx release cpu pointer */
982 		u32	drx_ptr;	/* tx release dma pointer */
983 		u32	fq_head;	/* fq head pointer */
984 		u32	fq_tail;	/* fq tail pointer */
985 		u32	fq_count;	/* fq free page count */
986 		u32	fq_blen;	/* fq free page buffer length */
987 		u32	tx_sch_rate;	/* tx scheduler rate control registers */
988 	} qdma;
989 	u32	gdm1_cnt;
990 	u32	gdma_to_ppe;
991 	u32	ppe_base;
992 	u32	wdma_base[2];
993 	u32	pse_iq_sta;
994 	u32	pse_oq_sta;
995 };
996 
997 /* struct mtk_eth_data -	This is the structure holding all differences
998  *				among various plaforms
999  * @reg_map			Soc register map.
1000  * @ana_rgc3:                   The offset for register ANA_RGC3 related to
1001  *				sgmiisys syscon
1002  * @caps			Flags shown the extra capability for the SoC
1003  * @hw_features			Flags shown HW features
1004  * @required_clks		Flags shown the bitmap for required clocks on
1005  *				the target SoC
1006  * @required_pctl		A bool value to show whether the SoC requires
1007  *				the extra setup for those pins used by GMAC.
1008  * @hash_offset			Flow table hash offset.
1009  * @version			SoC version.
1010  * @foe_entry_size		Foe table entry size.
1011  * @has_accounting		Bool indicating support for accounting of
1012  *				offloaded flows.
1013  * @txd_size			Tx DMA descriptor size.
1014  * @rxd_size			Rx DMA descriptor size.
1015  * @rx_irq_done_mask		Rx irq done register mask.
1016  * @rx_dma_l4_valid		Rx DMA valid register mask.
1017  * @dma_max_len			Max DMA tx/rx buffer length.
1018  * @dma_len_offset		Tx/Rx DMA length field offset.
1019  */
1020 struct mtk_soc_data {
1021 	const struct mtk_reg_map *reg_map;
1022 	u32             ana_rgc3;
1023 	u32		caps;
1024 	u32		required_clks;
1025 	bool		required_pctl;
1026 	u8		offload_version;
1027 	u8		hash_offset;
1028 	u8		version;
1029 	u16		foe_entry_size;
1030 	netdev_features_t hw_features;
1031 	bool		has_accounting;
1032 	bool		disable_pll_modes;
1033 	struct {
1034 		u32	txd_size;
1035 		u32	rxd_size;
1036 		u32	rx_irq_done_mask;
1037 		u32	rx_dma_l4_valid;
1038 		u32	dma_max_len;
1039 		u32	dma_len_offset;
1040 	} txrx;
1041 };
1042 
1043 #define MTK_DMA_MONITOR_TIMEOUT		msecs_to_jiffies(1000)
1044 
1045 /* currently no SoC has more than 3 macs */
1046 #define MTK_MAX_DEVS	3
1047 
1048 /* struct mtk_eth -	This is the main datasructure for holding the state
1049  *			of the driver
1050  * @dev:		The device pointer
1051  * @dev:		The device pointer used for dma mapping/alloc
1052  * @base:		The mapped register i/o base
1053  * @page_lock:		Make sure that register operations are atomic
1054  * @tx_irq__lock:	Make sure that IRQ register operations are atomic
1055  * @rx_irq__lock:	Make sure that IRQ register operations are atomic
1056  * @dim_lock:		Make sure that Net DIM operations are atomic
1057  * @dummy_dev:		we run 2 netdevs on 1 physical DMA ring and need a
1058  *			dummy for NAPI to work
1059  * @netdev:		The netdev instances
1060  * @mac:		Each netdev is linked to a physical MAC
1061  * @irq:		The IRQ that we are using
1062  * @msg_enable:		Ethtool msg level
1063  * @ethsys:		The register map pointing at the range used to setup
1064  *			MII modes
1065  * @infra:              The register map pointing at the range used to setup
1066  *                      SGMII and GePHY path
1067  * @sgmii_pcs:		Pointers to mtk-pcs-lynxi phylink_pcs instances
1068  * @pctl:		The register map pointing at the range used to setup
1069  *			GMAC port drive/slew values
1070  * @dma_refcnt:		track how many netdevs are using the DMA engine
1071  * @tx_ring:		Pointer to the memory holding info about the TX ring
1072  * @rx_ring:		Pointer to the memory holding info about the RX ring
1073  * @rx_ring_qdma:	Pointer to the memory holding info about the QDMA RX ring
1074  * @tx_napi:		The TX NAPI struct
1075  * @rx_napi:		The RX NAPI struct
1076  * @rx_events:		Net DIM RX event counter
1077  * @rx_packets:		Net DIM RX packet counter
1078  * @rx_bytes:		Net DIM RX byte counter
1079  * @rx_dim:		Net DIM RX context
1080  * @tx_events:		Net DIM TX event counter
1081  * @tx_packets:		Net DIM TX packet counter
1082  * @tx_bytes:		Net DIM TX byte counter
1083  * @tx_dim:		Net DIM TX context
1084  * @scratch_ring:	Newer SoCs need memory for a second HW managed TX ring
1085  * @phy_scratch_ring:	physical address of scratch_ring
1086  * @scratch_head:	The scratch memory that scratch_ring points to.
1087  * @clks:		clock array for all clocks required
1088  * @mii_bus:		If there is a bus we need to create an instance for it
1089  * @pending_work:	The workqueue used to reset the dma ring
1090  * @state:		Initialization and runtime state of the device
1091  * @soc:		Holding specific data among vaious SoCs
1092  */
1093 
1094 struct mtk_eth {
1095 	struct device			*dev;
1096 	struct device			*dma_dev;
1097 	void __iomem			*base;
1098 	spinlock_t			page_lock;
1099 	spinlock_t			tx_irq_lock;
1100 	spinlock_t			rx_irq_lock;
1101 	struct net_device		dummy_dev;
1102 	struct net_device		*netdev[MTK_MAX_DEVS];
1103 	struct mtk_mac			*mac[MTK_MAX_DEVS];
1104 	int				irq[3];
1105 	u32				msg_enable;
1106 	unsigned long			sysclk;
1107 	struct regmap			*ethsys;
1108 	struct regmap			*infra;
1109 	struct phylink_pcs		*sgmii_pcs[MTK_MAX_DEVS];
1110 	struct regmap			*pctl;
1111 	bool				hwlro;
1112 	refcount_t			dma_refcnt;
1113 	struct mtk_tx_ring		tx_ring;
1114 	struct mtk_rx_ring		rx_ring[MTK_MAX_RX_RING_NUM];
1115 	struct mtk_rx_ring		rx_ring_qdma;
1116 	struct napi_struct		tx_napi;
1117 	struct napi_struct		rx_napi;
1118 	void				*scratch_ring;
1119 	dma_addr_t			phy_scratch_ring;
1120 	void				*scratch_head;
1121 	struct clk			*clks[MTK_CLK_MAX];
1122 
1123 	struct mii_bus			*mii_bus;
1124 	struct work_struct		pending_work;
1125 	unsigned long			state;
1126 
1127 	const struct mtk_soc_data	*soc;
1128 
1129 	spinlock_t			dim_lock;
1130 
1131 	u32				rx_events;
1132 	u32				rx_packets;
1133 	u32				rx_bytes;
1134 	struct dim			rx_dim;
1135 
1136 	u32				tx_events;
1137 	u32				tx_packets;
1138 	u32				tx_bytes;
1139 	struct dim			tx_dim;
1140 
1141 	int				ip_align;
1142 
1143 	struct metadata_dst		*dsa_meta[MTK_MAX_DSA_PORTS];
1144 
1145 	struct mtk_ppe			*ppe[2];
1146 	struct rhashtable		flow_table;
1147 
1148 	struct bpf_prog			__rcu *prog;
1149 
1150 	struct {
1151 		struct delayed_work monitor_work;
1152 		u32 wdidx;
1153 		u8 wdma_hang_count;
1154 		u8 qdma_hang_count;
1155 		u8 adma_hang_count;
1156 	} reset;
1157 };
1158 
1159 /* struct mtk_mac -	the structure that holds the info about the MACs of the
1160  *			SoC
1161  * @id:			The number of the MAC
1162  * @interface:		Interface mode kept for detecting change in hw settings
1163  * @of_node:		Our devicetree node
1164  * @hw:			Backpointer to our main datastruture
1165  * @hw_stats:		Packet statistics counter
1166  */
1167 struct mtk_mac {
1168 	int				id;
1169 	phy_interface_t			interface;
1170 	int				speed;
1171 	struct device_node		*of_node;
1172 	struct phylink			*phylink;
1173 	struct phylink_config		phylink_config;
1174 	struct mtk_eth			*hw;
1175 	struct mtk_hw_stats		*hw_stats;
1176 	__be32				hwlro_ip[MTK_MAX_LRO_IP_CNT];
1177 	int				hwlro_ip_cnt;
1178 	unsigned int			syscfg0;
1179 	struct notifier_block		device_notifier;
1180 };
1181 
1182 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
1183 extern const struct of_device_id of_mtk_match[];
1184 
1185 static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
1186 {
1187 	return eth->soc->version == 1;
1188 }
1189 
1190 static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
1191 {
1192 	return eth->soc->version > 1;
1193 }
1194 
1195 static inline struct mtk_foe_entry *
1196 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
1197 {
1198 	const struct mtk_soc_data *soc = ppe->eth->soc;
1199 
1200 	return ppe->foe_table + hash * soc->foe_entry_size;
1201 }
1202 
1203 static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
1204 {
1205 	if (mtk_is_netsys_v2_or_greater(eth))
1206 		return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
1207 
1208 	return MTK_FOE_IB1_BIND_TIMESTAMP;
1209 }
1210 
1211 static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
1212 {
1213 	if (mtk_is_netsys_v2_or_greater(eth))
1214 		return MTK_FOE_IB1_BIND_PPPOE_V2;
1215 
1216 	return MTK_FOE_IB1_BIND_PPPOE;
1217 }
1218 
1219 static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
1220 {
1221 	if (mtk_is_netsys_v2_or_greater(eth))
1222 		return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
1223 
1224 	return MTK_FOE_IB1_BIND_VLAN_TAG;
1225 }
1226 
1227 static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
1228 {
1229 	if (mtk_is_netsys_v2_or_greater(eth))
1230 		return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
1231 
1232 	return MTK_FOE_IB1_BIND_VLAN_LAYER;
1233 }
1234 
1235 static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
1236 {
1237 	if (mtk_is_netsys_v2_or_greater(eth))
1238 		return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
1239 
1240 	return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
1241 }
1242 
1243 static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
1244 {
1245 	if (mtk_is_netsys_v2_or_greater(eth))
1246 		return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
1247 
1248 	return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
1249 }
1250 
1251 static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
1252 {
1253 	if (mtk_is_netsys_v2_or_greater(eth))
1254 		return MTK_FOE_IB1_PACKET_TYPE_V2;
1255 
1256 	return MTK_FOE_IB1_PACKET_TYPE;
1257 }
1258 
1259 static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
1260 {
1261 	if (mtk_is_netsys_v2_or_greater(eth))
1262 		return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
1263 
1264 	return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
1265 }
1266 
1267 static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
1268 {
1269 	if (mtk_is_netsys_v2_or_greater(eth))
1270 		return MTK_FOE_IB2_MULTICAST_V2;
1271 
1272 	return MTK_FOE_IB2_MULTICAST;
1273 }
1274 
1275 /* read the hardware status register */
1276 void mtk_stats_update_mac(struct mtk_mac *mac);
1277 
1278 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
1279 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
1280 
1281 int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
1282 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
1283 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
1284 
1285 int mtk_eth_offload_init(struct mtk_eth *eth);
1286 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1287 		     void *type_data);
1288 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
1289 			 int ppe_index);
1290 void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
1291 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
1292 
1293 
1294 #endif /* MTK_ETH_H */
1295