1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9 #ifndef MTK_ETH_H
10 #define MTK_ETH_H
11
12 #include <linux/dma-mapping.h>
13 #include <linux/netdevice.h>
14 #include <linux/of_net.h>
15 #include <linux/u64_stats_sync.h>
16 #include <linux/refcount.h>
17 #include <linux/phylink.h>
18 #include <linux/rhashtable.h>
19 #include <linux/dim.h>
20 #include <linux/bitfield.h>
21 #include <net/page_pool/types.h>
22 #include <linux/bpf_trace.h>
23 #include "mtk_ppe.h"
24
25 #define MTK_MAX_DSA_PORTS 7
26 #define MTK_DSA_PORT_MASK GENMASK(2, 0)
27
28 #define MTK_QDMA_NUM_QUEUES 16
29 #define MTK_QDMA_PAGE_SIZE 2048
30 #define MTK_MAX_RX_LENGTH 1536
31 #define MTK_MAX_RX_LENGTH_2K 2048
32 #define MTK_TX_DMA_BUF_LEN 0x3fff
33 #define MTK_TX_DMA_BUF_LEN_V2 0xffff
34 #define MTK_QDMA_RING_SIZE 2048
35 #define MTK_DMA_SIZE 512
36 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
37 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
38 #define MTK_DMA_DUMMY_DESC 0xffffffff
39 #define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
40 NETIF_MSG_PROBE | \
41 NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_IFDOWN | \
44 NETIF_MSG_IFUP | \
45 NETIF_MSG_RX_ERR | \
46 NETIF_MSG_TX_ERR)
47 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
48 NETIF_F_RXCSUM | \
49 NETIF_F_HW_VLAN_CTAG_TX | \
50 NETIF_F_SG | NETIF_F_TSO | \
51 NETIF_F_TSO6 | \
52 NETIF_F_IPV6_CSUM |\
53 NETIF_F_HW_TC)
54 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
55 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
56
57 #define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
58 #define MTK_PP_PAD (MTK_PP_HEADROOM + \
59 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
60 #define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
61
62 #define MTK_QRX_OFFSET 0x10
63
64 #define MTK_MAX_RX_RING_NUM 4
65 #define MTK_HW_LRO_DMA_SIZE 8
66
67 #define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
68 #define MTK_MAX_LRO_IP_CNT 2
69 #define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
70 #define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
71 #define MTK_HW_LRO_AGG_TIME 10 /* 200us */
72 #define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
73 #define MTK_HW_LRO_MAX_AGG_CNT 64
74 #define MTK_HW_LRO_BW_THRE 3000
75 #define MTK_HW_LRO_REPLACE_DELTA 1000
76 #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
77
78 /* Frame Engine Global Configuration */
79 #define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00)
80 #define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16)
81
82 /* Frame Engine Global Reset Register */
83 #define MTK_RST_GL 0x04
84 #define RST_GL_PSE BIT(0)
85
86 /* Frame Engine Interrupt Status Register */
87 #define MTK_INT_STATUS2 0x08
88 #define MTK_FE_INT_ENABLE 0x0c
89 #define MTK_FE_INT_FQ_EMPTY BIT(8)
90 #define MTK_FE_INT_TSO_FAIL BIT(12)
91 #define MTK_FE_INT_TSO_ILLEGAL BIT(13)
92 #define MTK_FE_INT_TSO_ALIGN BIT(14)
93 #define MTK_FE_INT_RFIFO_OV BIT(18)
94 #define MTK_FE_INT_RFIFO_UF BIT(19)
95 #define MTK_GDM1_AF BIT(28)
96 #define MTK_GDM2_AF BIT(29)
97
98 /* PDMA HW LRO Alter Flow Timer Register */
99 #define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
100
101 /* Frame Engine Interrupt Grouping Register */
102 #define MTK_FE_INT_GRP 0x20
103
104 /* CDMP Ingress Control Register */
105 #define MTK_CDMQ_IG_CTRL 0x1400
106 #define MTK_CDMQ_STAG_EN BIT(0)
107
108 /* CDMQ Exgress Control Register */
109 #define MTK_CDMQ_EG_CTRL 0x1404
110
111 /* CDMP Ingress Control Register */
112 #define MTK_CDMP_IG_CTRL 0x400
113 #define MTK_CDMP_STAG_EN BIT(0)
114
115 /* CDMP Exgress Control Register */
116 #define MTK_CDMP_EG_CTRL 0x404
117
118 /* GDM Exgress Control Register */
119 #define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
120 0x540 : 0x500 + (_x * 0x1000); })
121 #define MTK_GDMA_SPECIAL_TAG BIT(24)
122 #define MTK_GDMA_ICS_EN BIT(22)
123 #define MTK_GDMA_TCS_EN BIT(21)
124 #define MTK_GDMA_UCS_EN BIT(20)
125 #define MTK_GDMA_STRP_CRC BIT(16)
126 #define MTK_GDMA_TO_PDMA 0x0
127 #define MTK_GDMA_DROP_ALL 0x7777
128
129 /* GDM Egress Control Register */
130 #define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
131 0x544 : 0x504 + (_x * 0x1000); })
132 #define MTK_GDMA_XGDM_SEL BIT(31)
133
134 /* Unicast Filter MAC Address Register - Low */
135 #define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
136 0x548 : 0x508 + (_x * 0x1000); })
137
138 /* Unicast Filter MAC Address Register - High */
139 #define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
140 0x54C : 0x50C + (_x * 0x1000); })
141
142 /* Internal SRAM offset */
143 #define MTK_ETH_SRAM_OFFSET 0x40000
144
145 /* FE global misc reg*/
146 #define MTK_FE_GLO_MISC 0x124
147
148 /* PSE Free Queue Flow Control */
149 #define PSE_FQFC_CFG1 0x100
150 #define PSE_FQFC_CFG2 0x104
151 #define PSE_DROP_CFG 0x108
152 #define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
153
154 /* PSE Last FreeQ Page Request Control */
155 #define PSE_DUMY_REQ 0x10C
156 /* PSE_DUMY_REQ is not a typo but actually called like that also in
157 * MediaTek's datasheet
158 */
159 #define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
160 #define DUMMY_PAGE_THR 0x1
161
162 /* PSE Input Queue Reservation Register*/
163 #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
164
165 /* PSE Output Queue Threshold Register*/
166 #define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
167
168 /* GDM and CDM Threshold */
169 #define MTK_GDM2_THRES 0x1530
170 #define MTK_CDMW0_THRES 0x164c
171 #define MTK_CDMW1_THRES 0x1650
172 #define MTK_CDME0_THRES 0x1654
173 #define MTK_CDME1_THRES 0x1658
174 #define MTK_CDMM_THRES 0x165c
175
176 /* PDMA HW LRO Control Registers */
177 #define MTK_PDMA_LRO_CTRL_DW0 0x980
178 #define MTK_LRO_EN BIT(0)
179 #define MTK_L3_CKS_UPD_EN BIT(7)
180 #define MTK_L3_CKS_UPD_EN_V2 BIT(19)
181 #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
182 #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
183 #define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
184 #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
185 #define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
186
187 #define MTK_PDMA_LRO_CTRL_DW1 0x984
188 #define MTK_PDMA_LRO_CTRL_DW2 0x988
189 #define MTK_PDMA_LRO_CTRL_DW3 0x98c
190 #define MTK_ADMA_MODE BIT(15)
191 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
192
193 #define MTK_RX_DMA_LRO_EN BIT(8)
194 #define MTK_MULTI_EN BIT(10)
195 #define MTK_PDMA_SIZE_8DWORDS (1 << 4)
196
197 /* PDMA Global Configuration Register */
198 #define MTK_PDMA_LRO_SDL 0x3000
199 #define MTK_RX_CFG_SDL_OFFSET 16
200
201 /* PDMA Reset Index Register */
202 #define MTK_PST_DRX_IDX0 BIT(16)
203 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
204
205 /* PDMA Delay Interrupt Register */
206 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
207 #define MTK_PDMA_DELAY_RX_EN BIT(15)
208 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
209 #define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
210
211 #define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
212 #define MTK_PDMA_DELAY_TX_EN BIT(31)
213 #define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
214 #define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
215
216 #define MTK_PDMA_DELAY_PINT_MASK 0x7f
217 #define MTK_PDMA_DELAY_PTIME_MASK 0xff
218
219 /* PDMA HW LRO Alter Flow Delta Register */
220 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
221
222 /* PDMA HW LRO IP Setting Registers */
223 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04
224 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
225 #define MTK_RING_MYIP_VLD BIT(9)
226
227 /* PDMA HW LRO Ring Control Registers */
228 #define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
229 #define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
230 #define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
231 #define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
232 #define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
233 #define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
234 #define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
235 #define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
236 #define MTK_RING_AUTO_LERAN_MODE (3 << 6)
237 #define MTK_RING_VLD BIT(8)
238 #define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
239 #define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
240 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
241
242 /* QDMA TX Queue Configuration Registers */
243 #define MTK_QTX_OFFSET 0x10
244 #define QDMA_RES_THRES 4
245
246 /* QDMA Tx Queue Scheduler Configuration Registers */
247 #define MTK_QTX_SCH_TX_SEL BIT(31)
248 #define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
249
250 #define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
251 #define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
252 #define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
253 #define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
254 #define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
255 #define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
256 #define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
257 #define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
258 #define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
259
260 /* QDMA TX Scheduler Rate Control Register */
261 #define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
262
263 /* QDMA Global Configuration Register */
264 #define MTK_RX_2B_OFFSET BIT(31)
265 #define MTK_RX_BT_32DWORDS (3 << 11)
266 #define MTK_NDP_CO_PRO BIT(10)
267 #define MTK_TX_WB_DDONE BIT(6)
268 #define MTK_TX_BT_32DWORDS (3 << 4)
269 #define MTK_RX_DMA_BUSY BIT(3)
270 #define MTK_TX_DMA_BUSY BIT(1)
271 #define MTK_RX_DMA_EN BIT(2)
272 #define MTK_TX_DMA_EN BIT(0)
273 #define MTK_DMA_BUSY_TIMEOUT_US 1000000
274
275 /* QDMA V2 Global Configuration Register */
276 #define MTK_CHK_DDONE_EN BIT(28)
277 #define MTK_DMAD_WR_WDONE BIT(26)
278 #define MTK_WCOMP_EN BIT(24)
279 #define MTK_RESV_BUF (0x40 << 16)
280 #define MTK_MUTLI_CNT (0x4 << 12)
281 #define MTK_LEAKY_BUCKET_EN BIT(11)
282
283 /* QDMA Flow Control Register */
284 #define FC_THRES_DROP_MODE BIT(20)
285 #define FC_THRES_DROP_EN (7 << 16)
286 #define FC_THRES_MIN 0x4444
287
288 /* QDMA Interrupt Status Register */
289 #define MTK_RX_DONE_DLY BIT(30)
290 #define MTK_TX_DONE_DLY BIT(28)
291 #define MTK_RX_DONE_INT3 BIT(19)
292 #define MTK_RX_DONE_INT2 BIT(18)
293 #define MTK_RX_DONE_INT1 BIT(17)
294 #define MTK_RX_DONE_INT0 BIT(16)
295 #define MTK_TX_DONE_INT3 BIT(3)
296 #define MTK_TX_DONE_INT2 BIT(2)
297 #define MTK_TX_DONE_INT1 BIT(1)
298 #define MTK_TX_DONE_INT0 BIT(0)
299 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
300 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
301
302 #define MTK_RX_DONE_INT_V2 BIT(14)
303
304 #define MTK_CDM_TXFIFO_RDY BIT(7)
305
306 /* QDMA Interrupt grouping registers */
307 #define MTK_RLS_DONE_INT BIT(0)
308
309 /* QDMA TX NUM */
310 #define QID_BITS_V2(x) (((x) & 0x3f) << 16)
311 #define MTK_QDMA_GMAC2_QID 8
312
313 #define MTK_TX_DMA_BUF_SHIFT 8
314
315 /* QDMA V2 descriptor txd6 */
316 #define TX_DMA_INS_VLAN_V2 BIT(16)
317 /* QDMA V2 descriptor txd5 */
318 #define TX_DMA_CHKSUM_V2 (0x7 << 28)
319 #define TX_DMA_TSO_V2 BIT(31)
320
321 #define TX_DMA_SPTAG_V3 BIT(27)
322
323 /* QDMA V2 descriptor txd4 */
324 #define TX_DMA_FPORT_SHIFT_V2 8
325 #define TX_DMA_FPORT_MASK_V2 0xf
326 #define TX_DMA_SWC_V2 BIT(30)
327
328 /* QDMA descriptor txd4 */
329 #define TX_DMA_CHKSUM (0x7 << 29)
330 #define TX_DMA_TSO BIT(28)
331 #define TX_DMA_FPORT_SHIFT 25
332 #define TX_DMA_FPORT_MASK 0x7
333 #define TX_DMA_INS_VLAN BIT(16)
334
335 /* QDMA descriptor txd3 */
336 #define TX_DMA_OWNER_CPU BIT(31)
337 #define TX_DMA_LS0 BIT(30)
338 #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
339 #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
340 #define TX_DMA_SWC BIT(14)
341 #define TX_DMA_PQID GENMASK(3, 0)
342 #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
343 #if IS_ENABLED(CONFIG_64BIT)
344 # define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32)
345 # define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32))
346 #else
347 # define TX_DMA_GET_ADDR64(x) (0)
348 # define TX_DMA_PREP_ADDR64(x) (0)
349 #endif
350
351 /* PDMA on MT7628 */
352 #define TX_DMA_DONE BIT(31)
353 #define TX_DMA_LS1 BIT(14)
354 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
355
356 /* QDMA descriptor rxd2 */
357 #define RX_DMA_DONE BIT(31)
358 #define RX_DMA_LSO BIT(30)
359 #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
360 #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
361 #define RX_DMA_VTAG BIT(15)
362 #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
363 #if IS_ENABLED(CONFIG_64BIT)
364 # define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32)
365 # define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32))
366 #else
367 # define RX_DMA_GET_ADDR64(x) (0)
368 # define RX_DMA_PREP_ADDR64(x) (0)
369 #endif
370
371 /* QDMA descriptor rxd3 */
372 #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
373 #define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
374 #define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
375
376 /* QDMA descriptor rxd4 */
377 #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
378 #define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
379 #define MTK_RXD4_SRC_PORT GENMASK(21, 19)
380 #define MTK_RXD4_ALG GENMASK(31, 22)
381
382 /* QDMA descriptor rxd4 */
383 #define RX_DMA_L4_VALID BIT(24)
384 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
385 #define RX_DMA_SPECIAL_TAG BIT(22)
386
387 /* PDMA descriptor rxd5 */
388 #define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
389 #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
390 #define MTK_RXD5_SRC_PORT GENMASK(29, 26)
391
392 #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
393 #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
394
395 /* PDMA V2 descriptor rxd3 */
396 #define RX_DMA_VTAG_V2 BIT(0)
397 #define RX_DMA_L4_VALID_V2 BIT(2)
398
399 /* PHY Polling and SMI Master Control registers */
400 #define MTK_PPSC 0x10000
401 #define PPSC_MDC_CFG GENMASK(29, 24)
402 #define PPSC_MDC_TURBO BIT(20)
403 #define MDC_MAX_FREQ 25000000
404 #define MDC_MAX_DIVIDER 63
405
406 /* PHY Indirect Access Control registers */
407 #define MTK_PHY_IAC 0x10004
408 #define PHY_IAC_ACCESS BIT(31)
409 #define PHY_IAC_REG_MASK GENMASK(29, 25)
410 #define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
411 #define PHY_IAC_ADDR_MASK GENMASK(24, 20)
412 #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
413 #define PHY_IAC_CMD_MASK GENMASK(19, 18)
414 #define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
415 #define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
416 #define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
417 #define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
418 #define PHY_IAC_START_MASK GENMASK(17, 16)
419 #define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
420 #define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
421 #define PHY_IAC_DATA_MASK GENMASK(15, 0)
422 #define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
423 #define PHY_IAC_TIMEOUT HZ
424
425 #define MTK_MAC_MISC 0x1000c
426 #define MTK_MAC_MISC_V3 0x10010
427 #define MTK_MUX_TO_ESW BIT(0)
428 #define MISC_MDC_TURBO BIT(4)
429
430 /* XMAC status registers */
431 #define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
432 #define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
433 #define MTK_USXGMII_PCS_LINK BIT(8)
434 #define MTK_XGMAC_RX_FC BIT(5)
435 #define MTK_XGMAC_TX_FC BIT(4)
436 #define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
437 #define MTK_XGMAC_LINK_STS BIT(0)
438
439 /* GSW bridge registers */
440 #define MTK_GSW_CFG (0x10080)
441 #define GSWTX_IPG_MASK GENMASK(19, 16)
442 #define GSWTX_IPG_SHIFT 16
443 #define GSWRX_IPG_MASK GENMASK(3, 0)
444 #define GSWRX_IPG_SHIFT 0
445 #define GSW_IPG_11 11
446
447 /* Mac control registers */
448 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
449 #define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
450 #define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
451 #define MAC_MCR_MAX_RX_1518 0x0
452 #define MAC_MCR_MAX_RX_1536 0x1
453 #define MAC_MCR_MAX_RX_1552 0x2
454 #define MAC_MCR_MAX_RX_2048 0x3
455 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
456 #define MAC_MCR_FORCE_MODE BIT(15)
457 #define MAC_MCR_TX_EN BIT(14)
458 #define MAC_MCR_RX_EN BIT(13)
459 #define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
460 #define MAC_MCR_BACKOFF_EN BIT(9)
461 #define MAC_MCR_BACKPR_EN BIT(8)
462 #define MAC_MCR_FORCE_RX_FC BIT(5)
463 #define MAC_MCR_FORCE_TX_FC BIT(4)
464 #define MAC_MCR_SPEED_1000 BIT(3)
465 #define MAC_MCR_SPEED_100 BIT(2)
466 #define MAC_MCR_FORCE_DPX BIT(1)
467 #define MAC_MCR_FORCE_LINK BIT(0)
468 #define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
469
470 /* Mac status registers */
471 #define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
472 #define MAC_MSR_EEE1G BIT(7)
473 #define MAC_MSR_EEE100M BIT(6)
474 #define MAC_MSR_RX_FC BIT(5)
475 #define MAC_MSR_TX_FC BIT(4)
476 #define MAC_MSR_SPEED_1000 BIT(3)
477 #define MAC_MSR_SPEED_100 BIT(2)
478 #define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
479 #define MAC_MSR_DPX BIT(1)
480 #define MAC_MSR_LINK BIT(0)
481
482 /* TRGMII RXC control register */
483 #define TRGMII_RCK_CTRL 0x10300
484 #define DQSI0(x) ((x << 0) & GENMASK(6, 0))
485 #define DQSI1(x) ((x << 8) & GENMASK(14, 8))
486 #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
487 #define RXC_RST BIT(31)
488 #define RXC_DQSISEL BIT(30)
489 #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
490 #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
491
492 #define NUM_TRGMII_CTRL 5
493
494 /* TRGMII RXC control register */
495 #define TRGMII_TCK_CTRL 0x10340
496 #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
497 #define TXC_INV BIT(30)
498 #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
499 #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
500
501 /* TRGMII TX Drive Strength */
502 #define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
503 #define TD_DM_DRVP(x) ((x) & 0xf)
504 #define TD_DM_DRVN(x) (((x) & 0xf) << 4)
505
506 /* TRGMII Interface mode register */
507 #define INTF_MODE 0x10390
508 #define TRGMII_INTF_DIS BIT(0)
509 #define TRGMII_MODE BIT(1)
510 #define TRGMII_CENTRAL_ALIGNED BIT(2)
511 #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
512 #define INTF_MODE_RGMII_10_100 0
513
514 /* GPIO port control registers for GMAC 2*/
515 #define GPIO_OD33_CTRL8 0x4c0
516 #define GPIO_BIAS_CTRL 0xed0
517 #define GPIO_DRV_SEL10 0xf00
518
519 /* ethernet subsystem chip id register */
520 #define ETHSYS_CHIPID0_3 0x0
521 #define ETHSYS_CHIPID4_7 0x4
522 #define MT7623_ETH 7623
523 #define MT7622_ETH 7622
524 #define MT7621_ETH 7621
525
526 /* ethernet system control register */
527 #define ETHSYS_SYSCFG 0x10
528 #define SYSCFG_DRAM_TYPE_DDR2 BIT(4)
529
530 /* ethernet subsystem config register */
531 #define ETHSYS_SYSCFG0 0x14
532 #define SYSCFG0_GE_MASK 0x3
533 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
534 #define SYSCFG0_SGMII_MASK GENMASK(9, 7)
535 #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
536 #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
537 #define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
538 #define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
539
540
541 /* ethernet subsystem clock register */
542 #define ETHSYS_CLKCFG0 0x2c
543 #define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
544 #define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6))
545 #define ETHSYS_TRGMII_MT7621_APLL BIT(6)
546 #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
547
548 /* ethernet reset control register */
549 #define ETHSYS_RSTCTRL 0x34
550 #define RSTCTRL_FE BIT(6)
551 #define RSTCTRL_WDMA0 BIT(24)
552 #define RSTCTRL_WDMA1 BIT(25)
553 #define RSTCTRL_WDMA2 BIT(26)
554 #define RSTCTRL_PPE0 BIT(31)
555 #define RSTCTRL_PPE0_V2 BIT(30)
556 #define RSTCTRL_PPE1 BIT(31)
557 #define RSTCTRL_PPE0_V3 BIT(29)
558 #define RSTCTRL_PPE1_V3 BIT(30)
559 #define RSTCTRL_PPE2 BIT(31)
560 #define RSTCTRL_ETH BIT(23)
561
562 /* ethernet reset check idle register */
563 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
564
565 /* ethernet dma channel agent map */
566 #define ETHSYS_DMA_AG_MAP 0x408
567 #define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
568 #define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
569 #define ETHSYS_DMA_AG_MAP_PPE BIT(2)
570
571 /* Infrasys subsystem config registers */
572 #define INFRA_MISC2 0x70c
573 #define CO_QPHY_SEL BIT(0)
574 #define GEPHY_MAC_SEL BIT(1)
575
576 /* Top misc registers */
577 #define USB_PHY_SWITCH_REG 0x218
578 #define QPHY_SEL_MASK GENMASK(1, 0)
579 #define SGMII_QPHY_SEL 0x2
580
581 /* MT7628/88 specific stuff */
582 #define MT7628_PDMA_OFFSET 0x0800
583 #define MT7628_SDM_OFFSET 0x0c00
584
585 #define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
586 #define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
587 #define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
588 #define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
589 #define MT7628_PST_DTX_IDX0 BIT(0)
590
591 #define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
592 #define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
593
594 /* Counter / stat register */
595 #define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
596 #define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
597 #define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
598 #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
599 #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
600
601 #define MTK_FE_CDM1_FSM 0x220
602 #define MTK_FE_CDM2_FSM 0x224
603 #define MTK_FE_CDM3_FSM 0x238
604 #define MTK_FE_CDM4_FSM 0x298
605 #define MTK_FE_CDM5_FSM 0x318
606 #define MTK_FE_CDM6_FSM 0x328
607 #define MTK_FE_GDM1_FSM 0x228
608 #define MTK_FE_GDM2_FSM 0x22C
609
610 #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
611
612 struct mtk_rx_dma {
613 unsigned int rxd1;
614 unsigned int rxd2;
615 unsigned int rxd3;
616 unsigned int rxd4;
617 } __packed __aligned(4);
618
619 struct mtk_rx_dma_v2 {
620 unsigned int rxd1;
621 unsigned int rxd2;
622 unsigned int rxd3;
623 unsigned int rxd4;
624 unsigned int rxd5;
625 unsigned int rxd6;
626 unsigned int rxd7;
627 unsigned int rxd8;
628 } __packed __aligned(4);
629
630 struct mtk_tx_dma {
631 unsigned int txd1;
632 unsigned int txd2;
633 unsigned int txd3;
634 unsigned int txd4;
635 } __packed __aligned(4);
636
637 struct mtk_tx_dma_v2 {
638 unsigned int txd1;
639 unsigned int txd2;
640 unsigned int txd3;
641 unsigned int txd4;
642 unsigned int txd5;
643 unsigned int txd6;
644 unsigned int txd7;
645 unsigned int txd8;
646 } __packed __aligned(4);
647
648 struct mtk_eth;
649 struct mtk_mac;
650
651 struct mtk_xdp_stats {
652 u64 rx_xdp_redirect;
653 u64 rx_xdp_pass;
654 u64 rx_xdp_drop;
655 u64 rx_xdp_tx;
656 u64 rx_xdp_tx_errors;
657 u64 tx_xdp_xmit;
658 u64 tx_xdp_xmit_errors;
659 };
660
661 /* struct mtk_hw_stats - the structure that holds the traffic statistics.
662 * @stats_lock: make sure that stats operations are atomic
663 * @reg_offset: the status register offset of the SoC
664 * @syncp: the refcount
665 *
666 * All of the supported SoCs have hardware counters for traffic statistics.
667 * Whenever the status IRQ triggers we can read the latest stats from these
668 * counters and store them in this struct.
669 */
670 struct mtk_hw_stats {
671 u64 tx_bytes;
672 u64 tx_packets;
673 u64 tx_skip;
674 u64 tx_collisions;
675 u64 rx_bytes;
676 u64 rx_packets;
677 u64 rx_overflow;
678 u64 rx_fcs_errors;
679 u64 rx_short_errors;
680 u64 rx_long_errors;
681 u64 rx_checksum_errors;
682 u64 rx_flow_control_packets;
683
684 struct mtk_xdp_stats xdp_stats;
685
686 spinlock_t stats_lock;
687 u32 reg_offset;
688 struct u64_stats_sync syncp;
689 };
690
691 enum mtk_tx_flags {
692 /* PDMA descriptor can point at 1-2 segments. This enum allows us to
693 * track how memory was allocated so that it can be freed properly.
694 */
695 MTK_TX_FLAGS_SINGLE0 = 0x01,
696 MTK_TX_FLAGS_PAGE0 = 0x02,
697 };
698
699 /* This enum allows us to identify how the clock is defined on the array of the
700 * clock in the order
701 */
702 enum mtk_clks_map {
703 MTK_CLK_ETHIF,
704 MTK_CLK_SGMIITOP,
705 MTK_CLK_ESW,
706 MTK_CLK_GP0,
707 MTK_CLK_GP1,
708 MTK_CLK_GP2,
709 MTK_CLK_GP3,
710 MTK_CLK_XGP1,
711 MTK_CLK_XGP2,
712 MTK_CLK_XGP3,
713 MTK_CLK_CRYPTO,
714 MTK_CLK_FE,
715 MTK_CLK_TRGPLL,
716 MTK_CLK_SGMII_TX_250M,
717 MTK_CLK_SGMII_RX_250M,
718 MTK_CLK_SGMII_CDR_REF,
719 MTK_CLK_SGMII_CDR_FB,
720 MTK_CLK_SGMII2_TX_250M,
721 MTK_CLK_SGMII2_RX_250M,
722 MTK_CLK_SGMII2_CDR_REF,
723 MTK_CLK_SGMII2_CDR_FB,
724 MTK_CLK_SGMII_CK,
725 MTK_CLK_ETH2PLL,
726 MTK_CLK_WOCPU0,
727 MTK_CLK_WOCPU1,
728 MTK_CLK_NETSYS0,
729 MTK_CLK_NETSYS1,
730 MTK_CLK_ETHWARP_WOCPU2,
731 MTK_CLK_ETHWARP_WOCPU1,
732 MTK_CLK_ETHWARP_WOCPU0,
733 MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
734 MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
735 MTK_CLK_TOP_SGM_0_SEL,
736 MTK_CLK_TOP_SGM_1_SEL,
737 MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
738 MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
739 MTK_CLK_TOP_ETH_GMII_SEL,
740 MTK_CLK_TOP_ETH_REFCK_50M_SEL,
741 MTK_CLK_TOP_ETH_SYS_200M_SEL,
742 MTK_CLK_TOP_ETH_SYS_SEL,
743 MTK_CLK_TOP_ETH_XGMII_SEL,
744 MTK_CLK_TOP_ETH_MII_SEL,
745 MTK_CLK_TOP_NETSYS_SEL,
746 MTK_CLK_TOP_NETSYS_500M_SEL,
747 MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
748 MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
749 MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
750 MTK_CLK_TOP_NETSYS_WARP_SEL,
751 MTK_CLK_MAX
752 };
753
754 #define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
755 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
756 BIT_ULL(MTK_CLK_TRGPLL))
757 #define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
758 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
759 BIT_ULL(MTK_CLK_GP2) | \
760 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
761 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
762 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
763 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
764 BIT_ULL(MTK_CLK_SGMII_CK) | \
765 BIT_ULL(MTK_CLK_ETH2PLL))
766 #define MT7621_CLKS_BITMAP (0)
767 #define MT7628_CLKS_BITMAP (0)
768 #define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
769 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
770 BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
771 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
772 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
773 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
774 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
775 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
776 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
777 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
778 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
779 BIT_ULL(MTK_CLK_SGMII_CK) | \
780 BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
781 #define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
782 BIT_ULL(MTK_CLK_GP1) | \
783 BIT_ULL(MTK_CLK_WOCPU0) | \
784 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
785 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
786 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
787 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
788 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
789 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
790 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
791 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
792 BIT_ULL(MTK_CLK_SGMII_CK))
793 #define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
794 BIT_ULL(MTK_CLK_GP1) | \
795 BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
796 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
797 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
798 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
799 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
800 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
801 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
802 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
803 BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
804 #define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
805 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
806 BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
807 BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
808 BIT_ULL(MTK_CLK_CRYPTO) | \
809 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
810 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
811 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
812 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
813 BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
814 BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
815 BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
816 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
817 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
818 BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
819 BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
820 BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
821 BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
822 BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
823 BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
824 BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
825 BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
826 BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
827 BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
828 BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
829 BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
830 BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
831 BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
832 BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
833 BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
834
835 enum mtk_dev_state {
836 MTK_HW_INIT,
837 MTK_RESETTING
838 };
839
840 /* PSE Port Definition */
841 enum mtk_pse_port {
842 PSE_ADMA_PORT = 0,
843 PSE_GDM1_PORT,
844 PSE_GDM2_PORT,
845 PSE_PPE0_PORT,
846 PSE_PPE1_PORT,
847 PSE_QDMA_TX_PORT,
848 PSE_QDMA_RX_PORT,
849 PSE_DROP_PORT,
850 PSE_WDMA0_PORT,
851 PSE_WDMA1_PORT,
852 PSE_TDMA_PORT,
853 PSE_NONE_PORT,
854 PSE_PPE2_PORT,
855 PSE_WDMA2_PORT,
856 PSE_EIP197_PORT,
857 PSE_GDM3_PORT,
858 PSE_PORT_MAX
859 };
860
861 /* GMAC Identifier */
862 enum mtk_gmac_id {
863 MTK_GMAC1_ID = 0,
864 MTK_GMAC2_ID,
865 MTK_GMAC3_ID,
866 MTK_GMAC_ID_MAX
867 };
868
869 enum mtk_tx_buf_type {
870 MTK_TYPE_SKB,
871 MTK_TYPE_XDP_TX,
872 MTK_TYPE_XDP_NDO,
873 };
874
875 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
876 * by the TX descriptor s
877 * @skb: The SKB pointer of the packet being sent
878 * @dma_addr0: The base addr of the first segment
879 * @dma_len0: The length of the first segment
880 * @dma_addr1: The base addr of the second segment
881 * @dma_len1: The length of the second segment
882 */
883 struct mtk_tx_buf {
884 enum mtk_tx_buf_type type;
885 void *data;
886
887 u16 mac_id;
888 u16 flags;
889 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
890 DEFINE_DMA_UNMAP_LEN(dma_len0);
891 DEFINE_DMA_UNMAP_ADDR(dma_addr1);
892 DEFINE_DMA_UNMAP_LEN(dma_len1);
893 };
894
895 /* struct mtk_tx_ring - This struct holds info describing a TX ring
896 * @dma: The descriptor ring
897 * @buf: The memory pointed at by the ring
898 * @phys: The physical addr of tx_buf
899 * @next_free: Pointer to the next free descriptor
900 * @last_free: Pointer to the last free descriptor
901 * @last_free_ptr: Hardware pointer value of the last free descriptor
902 * @thresh: The threshold of minimum amount of free descriptors
903 * @free_count: QDMA uses a linked list. Track how many free descriptors
904 * are present
905 */
906 struct mtk_tx_ring {
907 void *dma;
908 struct mtk_tx_buf *buf;
909 dma_addr_t phys;
910 struct mtk_tx_dma *next_free;
911 struct mtk_tx_dma *last_free;
912 u32 last_free_ptr;
913 u16 thresh;
914 atomic_t free_count;
915 int dma_size;
916 struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
917 dma_addr_t phys_pdma;
918 int cpu_idx;
919 };
920
921 /* PDMA rx ring mode */
922 enum mtk_rx_flags {
923 MTK_RX_FLAGS_NORMAL = 0,
924 MTK_RX_FLAGS_HWLRO,
925 MTK_RX_FLAGS_QDMA,
926 };
927
928 /* struct mtk_rx_ring - This struct holds info describing a RX ring
929 * @dma: The descriptor ring
930 * @data: The memory pointed at by the ring
931 * @phys: The physical addr of rx_buf
932 * @frag_size: How big can each fragment be
933 * @buf_size: The size of each packet buffer
934 * @calc_idx: The current head of ring
935 */
936 struct mtk_rx_ring {
937 void *dma;
938 u8 **data;
939 dma_addr_t phys;
940 u16 frag_size;
941 u16 buf_size;
942 u16 dma_size;
943 bool calc_idx_update;
944 u16 calc_idx;
945 u32 crx_idx_reg;
946 /* page_pool */
947 struct page_pool *page_pool;
948 struct xdp_rxq_info xdp_q;
949 };
950
951 enum mkt_eth_capabilities {
952 MTK_RGMII_BIT = 0,
953 MTK_TRGMII_BIT,
954 MTK_SGMII_BIT,
955 MTK_ESW_BIT,
956 MTK_GEPHY_BIT,
957 MTK_MUX_BIT,
958 MTK_INFRA_BIT,
959 MTK_SHARED_SGMII_BIT,
960 MTK_HWLRO_BIT,
961 MTK_SHARED_INT_BIT,
962 MTK_TRGMII_MT7621_CLK_BIT,
963 MTK_QDMA_BIT,
964 MTK_SOC_MT7628_BIT,
965 MTK_RSTCTRL_PPE1_BIT,
966 MTK_RSTCTRL_PPE2_BIT,
967 MTK_U3_COPHY_V2_BIT,
968 MTK_SRAM_BIT,
969 MTK_36BIT_DMA_BIT,
970
971 /* MUX BITS*/
972 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
973 MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
974 MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
975 MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
976 MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
977
978 /* PATH BITS */
979 MTK_ETH_PATH_GMAC1_RGMII_BIT,
980 MTK_ETH_PATH_GMAC1_TRGMII_BIT,
981 MTK_ETH_PATH_GMAC1_SGMII_BIT,
982 MTK_ETH_PATH_GMAC2_RGMII_BIT,
983 MTK_ETH_PATH_GMAC2_SGMII_BIT,
984 MTK_ETH_PATH_GMAC2_GEPHY_BIT,
985 MTK_ETH_PATH_GDM1_ESW_BIT,
986 };
987
988 /* Supported hardware group on SoCs */
989 #define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
990 #define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
991 #define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
992 #define MTK_ESW BIT_ULL(MTK_ESW_BIT)
993 #define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
994 #define MTK_MUX BIT_ULL(MTK_MUX_BIT)
995 #define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
996 #define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
997 #define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
998 #define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
999 #define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
1000 #define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
1001 #define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
1002 #define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
1003 #define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
1004 #define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
1005 #define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
1006 #define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT)
1007
1008 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
1009 BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
1010 #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
1011 BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
1012 #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
1013 BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
1014 #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
1015 BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
1016 #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
1017 BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
1018
1019 /* Supported path present on SoCs */
1020 #define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
1021 #define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
1022 #define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
1023 #define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
1024 #define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
1025 #define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
1026 #define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
1027
1028 #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
1029 #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
1030 #define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
1031 #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
1032 #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
1033 #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
1034 #define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
1035
1036 /* MUXes present on SoCs */
1037 /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
1038 #define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
1039
1040 /* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
1041 #define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
1042 (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
1043
1044 /* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
1045 #define MTK_MUX_U3_GMAC2_TO_QPHY \
1046 (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
1047
1048 /* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
1049 #define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
1050 (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
1051 MTK_SHARED_SGMII)
1052
1053 /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
1054 #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
1055 (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
1056
1057 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
1058
1059 #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
1060 MTK_GMAC2_RGMII | MTK_SHARED_INT | \
1061 MTK_TRGMII_MT7621_CLK | MTK_QDMA)
1062
1063 #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
1064 MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
1065 MTK_MUX_GDM1_TO_GMAC1_ESW | \
1066 MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
1067
1068 #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
1069 MTK_QDMA)
1070
1071 #define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
1072
1073 #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
1074 MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
1075 MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
1076 MTK_MUX_U3_GMAC2_TO_QPHY | \
1077 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
1078
1079 #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
1080 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
1081 MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
1082 MTK_RSTCTRL_PPE1 | MTK_SRAM)
1083
1084 #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
1085 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
1086 MTK_RSTCTRL_PPE1 | MTK_SRAM)
1087
1088 #define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
1089 MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
1090
1091 struct mtk_tx_dma_desc_info {
1092 dma_addr_t addr;
1093 u32 size;
1094 u16 vlan_tci;
1095 u16 qid;
1096 u8 gso:1;
1097 u8 csum:1;
1098 u8 vlan:1;
1099 u8 first:1;
1100 u8 last:1;
1101 };
1102
1103 struct mtk_reg_map {
1104 u32 tx_irq_mask;
1105 u32 tx_irq_status;
1106 struct {
1107 u32 rx_ptr; /* rx base pointer */
1108 u32 rx_cnt_cfg; /* rx max count configuration */
1109 u32 pcrx_ptr; /* rx cpu pointer */
1110 u32 glo_cfg; /* global configuration */
1111 u32 rst_idx; /* reset index */
1112 u32 delay_irq; /* delay interrupt */
1113 u32 irq_status; /* interrupt status */
1114 u32 irq_mask; /* interrupt mask */
1115 u32 adma_rx_dbg0;
1116 u32 int_grp;
1117 } pdma;
1118 struct {
1119 u32 qtx_cfg; /* tx queue configuration */
1120 u32 qtx_sch; /* tx queue scheduler configuration */
1121 u32 rx_ptr; /* rx base pointer */
1122 u32 rx_cnt_cfg; /* rx max count configuration */
1123 u32 qcrx_ptr; /* rx cpu pointer */
1124 u32 glo_cfg; /* global configuration */
1125 u32 rst_idx; /* reset index */
1126 u32 delay_irq; /* delay interrupt */
1127 u32 fc_th; /* flow control */
1128 u32 int_grp;
1129 u32 hred; /* interrupt mask */
1130 u32 ctx_ptr; /* tx acquire cpu pointer */
1131 u32 dtx_ptr; /* tx acquire dma pointer */
1132 u32 crx_ptr; /* tx release cpu pointer */
1133 u32 drx_ptr; /* tx release dma pointer */
1134 u32 fq_head; /* fq head pointer */
1135 u32 fq_tail; /* fq tail pointer */
1136 u32 fq_count; /* fq free page count */
1137 u32 fq_blen; /* fq free page buffer length */
1138 u32 tx_sch_rate; /* tx scheduler rate control registers */
1139 } qdma;
1140 u32 gdm1_cnt;
1141 u32 gdma_to_ppe;
1142 u32 ppe_base;
1143 u32 wdma_base[2];
1144 u32 pse_iq_sta;
1145 u32 pse_oq_sta;
1146 };
1147
1148 /* struct mtk_eth_data - This is the structure holding all differences
1149 * among various plaforms
1150 * @reg_map Soc register map.
1151 * @ana_rgc3: The offset for register ANA_RGC3 related to
1152 * sgmiisys syscon
1153 * @caps Flags shown the extra capability for the SoC
1154 * @hw_features Flags shown HW features
1155 * @required_clks Flags shown the bitmap for required clocks on
1156 * the target SoC
1157 * @required_pctl A bool value to show whether the SoC requires
1158 * the extra setup for those pins used by GMAC.
1159 * @hash_offset Flow table hash offset.
1160 * @version SoC version.
1161 * @foe_entry_size Foe table entry size.
1162 * @has_accounting Bool indicating support for accounting of
1163 * offloaded flows.
1164 * @txd_size Tx DMA descriptor size.
1165 * @rxd_size Rx DMA descriptor size.
1166 * @rx_irq_done_mask Rx irq done register mask.
1167 * @rx_dma_l4_valid Rx DMA valid register mask.
1168 * @dma_max_len Max DMA tx/rx buffer length.
1169 * @dma_len_offset Tx/Rx DMA length field offset.
1170 */
1171 struct mtk_soc_data {
1172 const struct mtk_reg_map *reg_map;
1173 u32 ana_rgc3;
1174 u64 caps;
1175 u64 required_clks;
1176 bool required_pctl;
1177 u8 offload_version;
1178 u8 hash_offset;
1179 u8 version;
1180 u16 foe_entry_size;
1181 netdev_features_t hw_features;
1182 bool has_accounting;
1183 bool disable_pll_modes;
1184 struct {
1185 u32 txd_size;
1186 u32 rxd_size;
1187 u32 rx_irq_done_mask;
1188 u32 rx_dma_l4_valid;
1189 u32 dma_max_len;
1190 u32 dma_len_offset;
1191 } txrx;
1192 };
1193
1194 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
1195
1196 /* currently no SoC has more than 3 macs */
1197 #define MTK_MAX_DEVS 3
1198
1199 /* struct mtk_eth - This is the main datasructure for holding the state
1200 * of the driver
1201 * @dev: The device pointer
1202 * @dev: The device pointer used for dma mapping/alloc
1203 * @base: The mapped register i/o base
1204 * @page_lock: Make sure that register operations are atomic
1205 * @tx_irq__lock: Make sure that IRQ register operations are atomic
1206 * @rx_irq__lock: Make sure that IRQ register operations are atomic
1207 * @dim_lock: Make sure that Net DIM operations are atomic
1208 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
1209 * dummy for NAPI to work
1210 * @netdev: The netdev instances
1211 * @mac: Each netdev is linked to a physical MAC
1212 * @irq: The IRQ that we are using
1213 * @msg_enable: Ethtool msg level
1214 * @ethsys: The register map pointing at the range used to setup
1215 * MII modes
1216 * @infra: The register map pointing at the range used to setup
1217 * SGMII and GePHY path
1218 * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
1219 * @pctl: The register map pointing at the range used to setup
1220 * GMAC port drive/slew values
1221 * @dma_refcnt: track how many netdevs are using the DMA engine
1222 * @tx_ring: Pointer to the memory holding info about the TX ring
1223 * @rx_ring: Pointer to the memory holding info about the RX ring
1224 * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
1225 * @tx_napi: The TX NAPI struct
1226 * @rx_napi: The RX NAPI struct
1227 * @rx_events: Net DIM RX event counter
1228 * @rx_packets: Net DIM RX packet counter
1229 * @rx_bytes: Net DIM RX byte counter
1230 * @rx_dim: Net DIM RX context
1231 * @tx_events: Net DIM TX event counter
1232 * @tx_packets: Net DIM TX packet counter
1233 * @tx_bytes: Net DIM TX byte counter
1234 * @tx_dim: Net DIM TX context
1235 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
1236 * @phy_scratch_ring: physical address of scratch_ring
1237 * @scratch_head: The scratch memory that scratch_ring points to.
1238 * @clks: clock array for all clocks required
1239 * @mii_bus: If there is a bus we need to create an instance for it
1240 * @pending_work: The workqueue used to reset the dma ring
1241 * @state: Initialization and runtime state of the device
1242 * @soc: Holding specific data among vaious SoCs
1243 */
1244
1245 struct mtk_eth {
1246 struct device *dev;
1247 struct device *dma_dev;
1248 void __iomem *base;
1249 void *sram_base;
1250 spinlock_t page_lock;
1251 spinlock_t tx_irq_lock;
1252 spinlock_t rx_irq_lock;
1253 struct net_device dummy_dev;
1254 struct net_device *netdev[MTK_MAX_DEVS];
1255 struct mtk_mac *mac[MTK_MAX_DEVS];
1256 int irq[3];
1257 u32 msg_enable;
1258 unsigned long sysclk;
1259 struct regmap *ethsys;
1260 struct regmap *infra;
1261 struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
1262 struct regmap *pctl;
1263 bool hwlro;
1264 refcount_t dma_refcnt;
1265 struct mtk_tx_ring tx_ring;
1266 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
1267 struct mtk_rx_ring rx_ring_qdma;
1268 struct napi_struct tx_napi;
1269 struct napi_struct rx_napi;
1270 void *scratch_ring;
1271 dma_addr_t phy_scratch_ring;
1272 void *scratch_head;
1273 struct clk *clks[MTK_CLK_MAX];
1274
1275 struct mii_bus *mii_bus;
1276 struct work_struct pending_work;
1277 unsigned long state;
1278
1279 const struct mtk_soc_data *soc;
1280
1281 spinlock_t dim_lock;
1282
1283 u32 rx_events;
1284 u32 rx_packets;
1285 u32 rx_bytes;
1286 struct dim rx_dim;
1287
1288 u32 tx_events;
1289 u32 tx_packets;
1290 u32 tx_bytes;
1291 struct dim tx_dim;
1292
1293 int ip_align;
1294
1295 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
1296
1297 struct mtk_ppe *ppe[2];
1298 struct rhashtable flow_table;
1299
1300 struct bpf_prog __rcu *prog;
1301
1302 struct {
1303 struct delayed_work monitor_work;
1304 u32 wdidx;
1305 u8 wdma_hang_count;
1306 u8 qdma_hang_count;
1307 u8 adma_hang_count;
1308 } reset;
1309 };
1310
1311 /* struct mtk_mac - the structure that holds the info about the MACs of the
1312 * SoC
1313 * @id: The number of the MAC
1314 * @interface: Interface mode kept for detecting change in hw settings
1315 * @of_node: Our devicetree node
1316 * @hw: Backpointer to our main datastruture
1317 * @hw_stats: Packet statistics counter
1318 */
1319 struct mtk_mac {
1320 int id;
1321 phy_interface_t interface;
1322 int speed;
1323 struct device_node *of_node;
1324 struct phylink *phylink;
1325 struct phylink_config phylink_config;
1326 struct mtk_eth *hw;
1327 struct mtk_hw_stats *hw_stats;
1328 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
1329 int hwlro_ip_cnt;
1330 unsigned int syscfg0;
1331 struct notifier_block device_notifier;
1332 };
1333
1334 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
1335 extern const struct of_device_id of_mtk_match[];
1336
mtk_is_netsys_v1(struct mtk_eth * eth)1337 static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
1338 {
1339 return eth->soc->version == 1;
1340 }
1341
mtk_is_netsys_v2_or_greater(struct mtk_eth * eth)1342 static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
1343 {
1344 return eth->soc->version > 1;
1345 }
1346
mtk_is_netsys_v3_or_greater(struct mtk_eth * eth)1347 static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
1348 {
1349 return eth->soc->version > 2;
1350 }
1351
1352 static inline struct mtk_foe_entry *
mtk_foe_get_entry(struct mtk_ppe * ppe,u16 hash)1353 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
1354 {
1355 const struct mtk_soc_data *soc = ppe->eth->soc;
1356
1357 return ppe->foe_table + hash * soc->foe_entry_size;
1358 }
1359
mtk_get_ib1_ts_mask(struct mtk_eth * eth)1360 static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
1361 {
1362 if (mtk_is_netsys_v2_or_greater(eth))
1363 return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
1364
1365 return MTK_FOE_IB1_BIND_TIMESTAMP;
1366 }
1367
mtk_get_ib1_ppoe_mask(struct mtk_eth * eth)1368 static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
1369 {
1370 if (mtk_is_netsys_v2_or_greater(eth))
1371 return MTK_FOE_IB1_BIND_PPPOE_V2;
1372
1373 return MTK_FOE_IB1_BIND_PPPOE;
1374 }
1375
mtk_get_ib1_vlan_tag_mask(struct mtk_eth * eth)1376 static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
1377 {
1378 if (mtk_is_netsys_v2_or_greater(eth))
1379 return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
1380
1381 return MTK_FOE_IB1_BIND_VLAN_TAG;
1382 }
1383
mtk_get_ib1_vlan_layer_mask(struct mtk_eth * eth)1384 static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
1385 {
1386 if (mtk_is_netsys_v2_or_greater(eth))
1387 return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
1388
1389 return MTK_FOE_IB1_BIND_VLAN_LAYER;
1390 }
1391
mtk_prep_ib1_vlan_layer(struct mtk_eth * eth,u32 val)1392 static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
1393 {
1394 if (mtk_is_netsys_v2_or_greater(eth))
1395 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
1396
1397 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
1398 }
1399
mtk_get_ib1_vlan_layer(struct mtk_eth * eth,u32 val)1400 static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
1401 {
1402 if (mtk_is_netsys_v2_or_greater(eth))
1403 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
1404
1405 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
1406 }
1407
mtk_get_ib1_pkt_type_mask(struct mtk_eth * eth)1408 static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
1409 {
1410 if (mtk_is_netsys_v2_or_greater(eth))
1411 return MTK_FOE_IB1_PACKET_TYPE_V2;
1412
1413 return MTK_FOE_IB1_PACKET_TYPE;
1414 }
1415
mtk_get_ib1_pkt_type(struct mtk_eth * eth,u32 val)1416 static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
1417 {
1418 if (mtk_is_netsys_v2_or_greater(eth))
1419 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
1420
1421 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
1422 }
1423
mtk_get_ib2_multicast_mask(struct mtk_eth * eth)1424 static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
1425 {
1426 if (mtk_is_netsys_v2_or_greater(eth))
1427 return MTK_FOE_IB2_MULTICAST_V2;
1428
1429 return MTK_FOE_IB2_MULTICAST;
1430 }
1431
1432 /* read the hardware status register */
1433 void mtk_stats_update_mac(struct mtk_mac *mac);
1434
1435 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
1436 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
1437 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
1438
1439 int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
1440 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
1441 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
1442
1443 int mtk_eth_offload_init(struct mtk_eth *eth);
1444 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1445 void *type_data);
1446 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
1447 int ppe_index);
1448 void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
1449 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
1450
1451
1452 #endif /* MTK_ETH_H */
1453