1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> 5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> 6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> 7 */ 8 9 #ifndef MTK_ETH_H 10 #define MTK_ETH_H 11 12 #include <linux/dma-mapping.h> 13 #include <linux/netdevice.h> 14 #include <linux/of_net.h> 15 #include <linux/u64_stats_sync.h> 16 #include <linux/refcount.h> 17 #include <linux/phylink.h> 18 #include <linux/rhashtable.h> 19 #include <linux/dim.h> 20 #include <linux/bitfield.h> 21 #include <net/page_pool/types.h> 22 #include <linux/bpf_trace.h> 23 #include "mtk_ppe.h" 24 25 #define MTK_MAX_DSA_PORTS 7 26 #define MTK_DSA_PORT_MASK GENMASK(2, 0) 27 28 #define MTK_QDMA_NUM_QUEUES 16 29 #define MTK_QDMA_PAGE_SIZE 2048 30 #define MTK_MAX_RX_LENGTH 1536 31 #define MTK_MAX_RX_LENGTH_2K 2048 32 #define MTK_TX_DMA_BUF_LEN 0x3fff 33 #define MTK_TX_DMA_BUF_LEN_V2 0xffff 34 #define MTK_QDMA_RING_SIZE 2048 35 #define MTK_DMA_SIZE 512 36 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) 37 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) 38 #define MTK_DMA_DUMMY_DESC 0xffffffff 39 #define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ 40 NETIF_MSG_PROBE | \ 41 NETIF_MSG_LINK | \ 42 NETIF_MSG_TIMER | \ 43 NETIF_MSG_IFDOWN | \ 44 NETIF_MSG_IFUP | \ 45 NETIF_MSG_RX_ERR | \ 46 NETIF_MSG_TX_ERR) 47 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ 48 NETIF_F_RXCSUM | \ 49 NETIF_F_HW_VLAN_CTAG_TX | \ 50 NETIF_F_SG | NETIF_F_TSO | \ 51 NETIF_F_TSO6 | \ 52 NETIF_F_IPV6_CSUM |\ 53 NETIF_F_HW_TC) 54 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) 55 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) 56 57 #define MTK_PP_HEADROOM XDP_PACKET_HEADROOM 58 #define MTK_PP_PAD (MTK_PP_HEADROOM + \ 59 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 60 #define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD) 61 62 #define MTK_QRX_OFFSET 0x10 63 64 #define MTK_MAX_RX_RING_NUM 4 65 #define MTK_HW_LRO_DMA_SIZE 8 66 67 #define MTK_MAX_LRO_RX_LENGTH (4096 * 3) 68 #define MTK_MAX_LRO_IP_CNT 2 69 #define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */ 70 #define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */ 71 #define MTK_HW_LRO_AGG_TIME 10 /* 200us */ 72 #define MTK_HW_LRO_AGE_TIME 50 /* 1ms */ 73 #define MTK_HW_LRO_MAX_AGG_CNT 64 74 #define MTK_HW_LRO_BW_THRE 3000 75 #define MTK_HW_LRO_REPLACE_DELTA 1000 76 #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 77 78 /* Frame Engine Global Configuration */ 79 #define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00) 80 #define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16) 81 82 /* Frame Engine Global Reset Register */ 83 #define MTK_RST_GL 0x04 84 #define RST_GL_PSE BIT(0) 85 86 /* Frame Engine Interrupt Status Register */ 87 #define MTK_INT_STATUS2 0x08 88 #define MTK_FE_INT_ENABLE 0x0c 89 #define MTK_FE_INT_FQ_EMPTY BIT(8) 90 #define MTK_FE_INT_TSO_FAIL BIT(12) 91 #define MTK_FE_INT_TSO_ILLEGAL BIT(13) 92 #define MTK_FE_INT_TSO_ALIGN BIT(14) 93 #define MTK_FE_INT_RFIFO_OV BIT(18) 94 #define MTK_FE_INT_RFIFO_UF BIT(19) 95 #define MTK_GDM1_AF BIT(28) 96 #define MTK_GDM2_AF BIT(29) 97 98 /* PDMA HW LRO Alter Flow Timer Register */ 99 #define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c 100 101 /* Frame Engine Interrupt Grouping Register */ 102 #define MTK_FE_INT_GRP 0x20 103 104 /* CDMP Ingress Control Register */ 105 #define MTK_CDMQ_IG_CTRL 0x1400 106 #define MTK_CDMQ_STAG_EN BIT(0) 107 108 /* CDMQ Exgress Control Register */ 109 #define MTK_CDMQ_EG_CTRL 0x1404 110 111 /* CDMP Ingress Control Register */ 112 #define MTK_CDMP_IG_CTRL 0x400 113 #define MTK_CDMP_STAG_EN BIT(0) 114 115 /* CDMP Exgress Control Register */ 116 #define MTK_CDMP_EG_CTRL 0x404 117 118 /* GDM Exgress Control Register */ 119 #define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 120 0x540 : 0x500 + (_x * 0x1000); }) 121 #define MTK_GDMA_SPECIAL_TAG BIT(24) 122 #define MTK_GDMA_ICS_EN BIT(22) 123 #define MTK_GDMA_TCS_EN BIT(21) 124 #define MTK_GDMA_UCS_EN BIT(20) 125 #define MTK_GDMA_STRP_CRC BIT(16) 126 #define MTK_GDMA_TO_PDMA 0x0 127 #define MTK_GDMA_DROP_ALL 0x7777 128 129 /* GDM Egress Control Register */ 130 #define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 131 0x544 : 0x504 + (_x * 0x1000); }) 132 #define MTK_GDMA_XGDM_SEL BIT(31) 133 134 /* Unicast Filter MAC Address Register - Low */ 135 #define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 136 0x548 : 0x508 + (_x * 0x1000); }) 137 138 /* Unicast Filter MAC Address Register - High */ 139 #define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 140 0x54C : 0x50C + (_x * 0x1000); }) 141 142 /* Internal SRAM offset */ 143 #define MTK_ETH_SRAM_OFFSET 0x40000 144 145 /* FE global misc reg*/ 146 #define MTK_FE_GLO_MISC 0x124 147 148 /* PSE Free Queue Flow Control */ 149 #define PSE_FQFC_CFG1 0x100 150 #define PSE_FQFC_CFG2 0x104 151 #define PSE_DROP_CFG 0x108 152 #define PSE_PPE0_DROP 0x110 153 154 /* PSE Input Queue Reservation Register*/ 155 #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) 156 157 /* PSE Output Queue Threshold Register*/ 158 #define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) 159 160 /* GDM and CDM Threshold */ 161 #define MTK_GDM2_THRES 0x1530 162 #define MTK_CDMW0_THRES 0x164c 163 #define MTK_CDMW1_THRES 0x1650 164 #define MTK_CDME0_THRES 0x1654 165 #define MTK_CDME1_THRES 0x1658 166 #define MTK_CDMM_THRES 0x165c 167 168 /* PDMA HW LRO Control Registers */ 169 #define MTK_PDMA_LRO_CTRL_DW0 0x980 170 #define MTK_LRO_EN BIT(0) 171 #define MTK_L3_CKS_UPD_EN BIT(7) 172 #define MTK_L3_CKS_UPD_EN_V2 BIT(19) 173 #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) 174 #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) 175 #define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) 176 #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) 177 #define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) 178 179 #define MTK_PDMA_LRO_CTRL_DW1 0x984 180 #define MTK_PDMA_LRO_CTRL_DW2 0x988 181 #define MTK_PDMA_LRO_CTRL_DW3 0x98c 182 #define MTK_ADMA_MODE BIT(15) 183 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) 184 185 #define MTK_RX_DMA_LRO_EN BIT(8) 186 #define MTK_MULTI_EN BIT(10) 187 #define MTK_PDMA_SIZE_8DWORDS (1 << 4) 188 189 /* PDMA Global Configuration Register */ 190 #define MTK_PDMA_LRO_SDL 0x3000 191 #define MTK_RX_CFG_SDL_OFFSET 16 192 193 /* PDMA Reset Index Register */ 194 #define MTK_PST_DRX_IDX0 BIT(16) 195 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) 196 197 /* PDMA Delay Interrupt Register */ 198 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) 199 #define MTK_PDMA_DELAY_RX_EN BIT(15) 200 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 201 #define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 202 203 #define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) 204 #define MTK_PDMA_DELAY_TX_EN BIT(31) 205 #define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 206 #define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 207 208 #define MTK_PDMA_DELAY_PINT_MASK 0x7f 209 #define MTK_PDMA_DELAY_PTIME_MASK 0xff 210 211 /* PDMA HW LRO Alter Flow Delta Register */ 212 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c 213 214 /* PDMA HW LRO IP Setting Registers */ 215 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04 216 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) 217 #define MTK_RING_MYIP_VLD BIT(9) 218 219 /* PDMA HW LRO Ring Control Registers */ 220 #define MTK_LRO_RX_RING0_CTRL_DW1 0xb28 221 #define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c 222 #define MTK_LRO_RX_RING0_CTRL_DW3 0xb30 223 #define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40)) 224 #define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40)) 225 #define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40)) 226 #define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22) 227 #define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f) 228 #define MTK_RING_AUTO_LERAN_MODE (3 << 6) 229 #define MTK_RING_VLD BIT(8) 230 #define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10) 231 #define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26) 232 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) 233 234 /* QDMA TX Queue Configuration Registers */ 235 #define MTK_QTX_OFFSET 0x10 236 #define QDMA_RES_THRES 4 237 238 /* QDMA Tx Queue Scheduler Configuration Registers */ 239 #define MTK_QTX_SCH_TX_SEL BIT(31) 240 #define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30) 241 242 #define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30) 243 #define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28) 244 #define MTK_QTX_SCH_MIN_RATE_EN BIT(27) 245 #define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20) 246 #define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16) 247 #define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12) 248 #define MTK_QTX_SCH_MAX_RATE_EN BIT(11) 249 #define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4) 250 #define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0) 251 252 /* QDMA TX Scheduler Rate Control Register */ 253 #define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15) 254 255 /* QDMA Global Configuration Register */ 256 #define MTK_RX_2B_OFFSET BIT(31) 257 #define MTK_RX_BT_32DWORDS (3 << 11) 258 #define MTK_NDP_CO_PRO BIT(10) 259 #define MTK_TX_WB_DDONE BIT(6) 260 #define MTK_TX_BT_32DWORDS (3 << 4) 261 #define MTK_RX_DMA_BUSY BIT(3) 262 #define MTK_TX_DMA_BUSY BIT(1) 263 #define MTK_RX_DMA_EN BIT(2) 264 #define MTK_TX_DMA_EN BIT(0) 265 #define MTK_DMA_BUSY_TIMEOUT_US 1000000 266 267 /* QDMA V2 Global Configuration Register */ 268 #define MTK_CHK_DDONE_EN BIT(28) 269 #define MTK_DMAD_WR_WDONE BIT(26) 270 #define MTK_WCOMP_EN BIT(24) 271 #define MTK_RESV_BUF (0x40 << 16) 272 #define MTK_MUTLI_CNT (0x4 << 12) 273 #define MTK_LEAKY_BUCKET_EN BIT(11) 274 275 /* QDMA Flow Control Register */ 276 #define FC_THRES_DROP_MODE BIT(20) 277 #define FC_THRES_DROP_EN (7 << 16) 278 #define FC_THRES_MIN 0x4444 279 280 /* QDMA Interrupt Status Register */ 281 #define MTK_RX_DONE_DLY BIT(30) 282 #define MTK_TX_DONE_DLY BIT(28) 283 #define MTK_RX_DONE_INT3 BIT(19) 284 #define MTK_RX_DONE_INT2 BIT(18) 285 #define MTK_RX_DONE_INT1 BIT(17) 286 #define MTK_RX_DONE_INT0 BIT(16) 287 #define MTK_TX_DONE_INT3 BIT(3) 288 #define MTK_TX_DONE_INT2 BIT(2) 289 #define MTK_TX_DONE_INT1 BIT(1) 290 #define MTK_TX_DONE_INT0 BIT(0) 291 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY 292 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY 293 294 #define MTK_RX_DONE_INT_V2 BIT(14) 295 296 #define MTK_CDM_TXFIFO_RDY BIT(7) 297 298 /* QDMA Interrupt grouping registers */ 299 #define MTK_RLS_DONE_INT BIT(0) 300 301 /* QDMA TX NUM */ 302 #define QID_BITS_V2(x) (((x) & 0x3f) << 16) 303 #define MTK_QDMA_GMAC2_QID 8 304 305 #define MTK_TX_DMA_BUF_SHIFT 8 306 307 /* QDMA V2 descriptor txd6 */ 308 #define TX_DMA_INS_VLAN_V2 BIT(16) 309 /* QDMA V2 descriptor txd5 */ 310 #define TX_DMA_CHKSUM_V2 (0x7 << 28) 311 #define TX_DMA_TSO_V2 BIT(31) 312 313 #define TX_DMA_SPTAG_V3 BIT(27) 314 315 /* QDMA V2 descriptor txd4 */ 316 #define TX_DMA_FPORT_SHIFT_V2 8 317 #define TX_DMA_FPORT_MASK_V2 0xf 318 #define TX_DMA_SWC_V2 BIT(30) 319 320 /* QDMA descriptor txd4 */ 321 #define TX_DMA_CHKSUM (0x7 << 29) 322 #define TX_DMA_TSO BIT(28) 323 #define TX_DMA_FPORT_SHIFT 25 324 #define TX_DMA_FPORT_MASK 0x7 325 #define TX_DMA_INS_VLAN BIT(16) 326 327 /* QDMA descriptor txd3 */ 328 #define TX_DMA_OWNER_CPU BIT(31) 329 #define TX_DMA_LS0 BIT(30) 330 #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 331 #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) 332 #define TX_DMA_SWC BIT(14) 333 #define TX_DMA_PQID GENMASK(3, 0) 334 #define TX_DMA_ADDR64_MASK GENMASK(3, 0) 335 #if IS_ENABLED(CONFIG_64BIT) 336 # define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32) 337 # define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32)) 338 #else 339 # define TX_DMA_GET_ADDR64(x) (0) 340 # define TX_DMA_PREP_ADDR64(x) (0) 341 #endif 342 343 /* PDMA on MT7628 */ 344 #define TX_DMA_DONE BIT(31) 345 #define TX_DMA_LS1 BIT(14) 346 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) 347 348 /* QDMA descriptor rxd2 */ 349 #define RX_DMA_DONE BIT(31) 350 #define RX_DMA_LSO BIT(30) 351 #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 352 #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) 353 #define RX_DMA_VTAG BIT(15) 354 #define RX_DMA_ADDR64_MASK GENMASK(3, 0) 355 #if IS_ENABLED(CONFIG_64BIT) 356 # define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32) 357 # define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32)) 358 #else 359 # define RX_DMA_GET_ADDR64(x) (0) 360 # define RX_DMA_PREP_ADDR64(x) (0) 361 #endif 362 363 /* QDMA descriptor rxd3 */ 364 #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) 365 #define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) 366 #define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) 367 368 /* QDMA descriptor rxd4 */ 369 #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) 370 #define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) 371 #define MTK_RXD4_SRC_PORT GENMASK(21, 19) 372 #define MTK_RXD4_ALG GENMASK(31, 22) 373 374 /* QDMA descriptor rxd4 */ 375 #define RX_DMA_L4_VALID BIT(24) 376 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ 377 #define RX_DMA_SPECIAL_TAG BIT(22) 378 379 /* PDMA descriptor rxd5 */ 380 #define MTK_RXD5_FOE_ENTRY GENMASK(14, 0) 381 #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) 382 #define MTK_RXD5_SRC_PORT GENMASK(29, 26) 383 384 #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) 385 #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) 386 387 /* PDMA V2 descriptor rxd3 */ 388 #define RX_DMA_VTAG_V2 BIT(0) 389 #define RX_DMA_L4_VALID_V2 BIT(2) 390 391 /* PHY Polling and SMI Master Control registers */ 392 #define MTK_PPSC 0x10000 393 #define PPSC_MDC_CFG GENMASK(29, 24) 394 #define PPSC_MDC_TURBO BIT(20) 395 #define MDC_MAX_FREQ 25000000 396 #define MDC_MAX_DIVIDER 63 397 398 /* PHY Indirect Access Control registers */ 399 #define MTK_PHY_IAC 0x10004 400 #define PHY_IAC_ACCESS BIT(31) 401 #define PHY_IAC_REG_MASK GENMASK(29, 25) 402 #define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x)) 403 #define PHY_IAC_ADDR_MASK GENMASK(24, 20) 404 #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) 405 #define PHY_IAC_CMD_MASK GENMASK(19, 18) 406 #define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0) 407 #define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1) 408 #define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2) 409 #define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3) 410 #define PHY_IAC_START_MASK GENMASK(17, 16) 411 #define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0) 412 #define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1) 413 #define PHY_IAC_DATA_MASK GENMASK(15, 0) 414 #define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x)) 415 #define PHY_IAC_TIMEOUT HZ 416 417 #define MTK_MAC_MISC 0x1000c 418 #define MTK_MAC_MISC_V3 0x10010 419 #define MTK_MUX_TO_ESW BIT(0) 420 #define MISC_MDC_TURBO BIT(4) 421 422 /* XMAC status registers */ 423 #define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C) 424 #define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15)) 425 #define MTK_USXGMII_PCS_LINK BIT(8) 426 #define MTK_XGMAC_RX_FC BIT(5) 427 #define MTK_XGMAC_TX_FC BIT(4) 428 #define MTK_USXGMII_PCS_MODE GENMASK(3, 1) 429 #define MTK_XGMAC_LINK_STS BIT(0) 430 431 /* GSW bridge registers */ 432 #define MTK_GSW_CFG (0x10080) 433 #define GSWTX_IPG_MASK GENMASK(19, 16) 434 #define GSWTX_IPG_SHIFT 16 435 #define GSWRX_IPG_MASK GENMASK(3, 0) 436 #define GSWRX_IPG_SHIFT 0 437 #define GSW_IPG_11 11 438 439 /* Mac control registers */ 440 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) 441 #define MAC_MCR_MAX_RX_MASK GENMASK(25, 24) 442 #define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24)) 443 #define MAC_MCR_MAX_RX_1518 0x0 444 #define MAC_MCR_MAX_RX_1536 0x1 445 #define MAC_MCR_MAX_RX_1552 0x2 446 #define MAC_MCR_MAX_RX_2048 0x3 447 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) 448 #define MAC_MCR_FORCE_MODE BIT(15) 449 #define MAC_MCR_TX_EN BIT(14) 450 #define MAC_MCR_RX_EN BIT(13) 451 #define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) 452 #define MAC_MCR_BACKOFF_EN BIT(9) 453 #define MAC_MCR_BACKPR_EN BIT(8) 454 #define MAC_MCR_FORCE_RX_FC BIT(5) 455 #define MAC_MCR_FORCE_TX_FC BIT(4) 456 #define MAC_MCR_SPEED_1000 BIT(3) 457 #define MAC_MCR_SPEED_100 BIT(2) 458 #define MAC_MCR_FORCE_DPX BIT(1) 459 #define MAC_MCR_FORCE_LINK BIT(0) 460 #define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) 461 462 /* Mac status registers */ 463 #define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) 464 #define MAC_MSR_EEE1G BIT(7) 465 #define MAC_MSR_EEE100M BIT(6) 466 #define MAC_MSR_RX_FC BIT(5) 467 #define MAC_MSR_TX_FC BIT(4) 468 #define MAC_MSR_SPEED_1000 BIT(3) 469 #define MAC_MSR_SPEED_100 BIT(2) 470 #define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) 471 #define MAC_MSR_DPX BIT(1) 472 #define MAC_MSR_LINK BIT(0) 473 474 /* TRGMII RXC control register */ 475 #define TRGMII_RCK_CTRL 0x10300 476 #define DQSI0(x) ((x << 0) & GENMASK(6, 0)) 477 #define DQSI1(x) ((x << 8) & GENMASK(14, 8)) 478 #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 479 #define RXC_RST BIT(31) 480 #define RXC_DQSISEL BIT(30) 481 #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) 482 #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) 483 484 #define NUM_TRGMII_CTRL 5 485 486 /* TRGMII RXC control register */ 487 #define TRGMII_TCK_CTRL 0x10340 488 #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 489 #define TXC_INV BIT(30) 490 #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) 491 #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) 492 493 /* TRGMII TX Drive Strength */ 494 #define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) 495 #define TD_DM_DRVP(x) ((x) & 0xf) 496 #define TD_DM_DRVN(x) (((x) & 0xf) << 4) 497 498 /* TRGMII Interface mode register */ 499 #define INTF_MODE 0x10390 500 #define TRGMII_INTF_DIS BIT(0) 501 #define TRGMII_MODE BIT(1) 502 #define TRGMII_CENTRAL_ALIGNED BIT(2) 503 #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) 504 #define INTF_MODE_RGMII_10_100 0 505 506 /* GPIO port control registers for GMAC 2*/ 507 #define GPIO_OD33_CTRL8 0x4c0 508 #define GPIO_BIAS_CTRL 0xed0 509 #define GPIO_DRV_SEL10 0xf00 510 511 /* ethernet subsystem chip id register */ 512 #define ETHSYS_CHIPID0_3 0x0 513 #define ETHSYS_CHIPID4_7 0x4 514 #define MT7623_ETH 7623 515 #define MT7622_ETH 7622 516 #define MT7621_ETH 7621 517 518 /* ethernet system control register */ 519 #define ETHSYS_SYSCFG 0x10 520 #define SYSCFG_DRAM_TYPE_DDR2 BIT(4) 521 522 /* ethernet subsystem config register */ 523 #define ETHSYS_SYSCFG0 0x14 524 #define SYSCFG0_GE_MASK 0x3 525 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) 526 #define SYSCFG0_SGMII_MASK GENMASK(9, 7) 527 #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) 528 #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) 529 #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) 530 #define SYSCFG0_SGMII_GMAC2_V2 BIT(8) 531 532 533 /* ethernet subsystem clock register */ 534 #define ETHSYS_CLKCFG0 0x2c 535 #define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) 536 #define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6)) 537 #define ETHSYS_TRGMII_MT7621_APLL BIT(6) 538 #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) 539 540 /* ethernet reset control register */ 541 #define ETHSYS_RSTCTRL 0x34 542 #define RSTCTRL_FE BIT(6) 543 #define RSTCTRL_WDMA0 BIT(24) 544 #define RSTCTRL_WDMA1 BIT(25) 545 #define RSTCTRL_WDMA2 BIT(26) 546 #define RSTCTRL_PPE0 BIT(31) 547 #define RSTCTRL_PPE0_V2 BIT(30) 548 #define RSTCTRL_PPE1 BIT(31) 549 #define RSTCTRL_PPE0_V3 BIT(29) 550 #define RSTCTRL_PPE1_V3 BIT(30) 551 #define RSTCTRL_PPE2 BIT(31) 552 #define RSTCTRL_ETH BIT(23) 553 554 /* ethernet reset check idle register */ 555 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 556 557 /* ethernet dma channel agent map */ 558 #define ETHSYS_DMA_AG_MAP 0x408 559 #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) 560 #define ETHSYS_DMA_AG_MAP_QDMA BIT(1) 561 #define ETHSYS_DMA_AG_MAP_PPE BIT(2) 562 563 /* Infrasys subsystem config registers */ 564 #define INFRA_MISC2 0x70c 565 #define CO_QPHY_SEL BIT(0) 566 #define GEPHY_MAC_SEL BIT(1) 567 568 /* Top misc registers */ 569 #define USB_PHY_SWITCH_REG 0x218 570 #define QPHY_SEL_MASK GENMASK(1, 0) 571 #define SGMII_QPHY_SEL 0x2 572 573 /* MT7628/88 specific stuff */ 574 #define MT7628_PDMA_OFFSET 0x0800 575 #define MT7628_SDM_OFFSET 0x0c00 576 577 #define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) 578 #define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) 579 #define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) 580 #define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) 581 #define MT7628_PST_DTX_IDX0 BIT(0) 582 583 #define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) 584 #define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) 585 586 /* Counter / stat register */ 587 #define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100) 588 #define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104) 589 #define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108) 590 #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) 591 #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) 592 593 #define MTK_FE_CDM1_FSM 0x220 594 #define MTK_FE_CDM2_FSM 0x224 595 #define MTK_FE_CDM3_FSM 0x238 596 #define MTK_FE_CDM4_FSM 0x298 597 #define MTK_FE_CDM5_FSM 0x318 598 #define MTK_FE_CDM6_FSM 0x328 599 #define MTK_FE_GDM1_FSM 0x228 600 #define MTK_FE_GDM2_FSM 0x22C 601 602 #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) 603 604 struct mtk_rx_dma { 605 unsigned int rxd1; 606 unsigned int rxd2; 607 unsigned int rxd3; 608 unsigned int rxd4; 609 } __packed __aligned(4); 610 611 struct mtk_rx_dma_v2 { 612 unsigned int rxd1; 613 unsigned int rxd2; 614 unsigned int rxd3; 615 unsigned int rxd4; 616 unsigned int rxd5; 617 unsigned int rxd6; 618 unsigned int rxd7; 619 unsigned int rxd8; 620 } __packed __aligned(4); 621 622 struct mtk_tx_dma { 623 unsigned int txd1; 624 unsigned int txd2; 625 unsigned int txd3; 626 unsigned int txd4; 627 } __packed __aligned(4); 628 629 struct mtk_tx_dma_v2 { 630 unsigned int txd1; 631 unsigned int txd2; 632 unsigned int txd3; 633 unsigned int txd4; 634 unsigned int txd5; 635 unsigned int txd6; 636 unsigned int txd7; 637 unsigned int txd8; 638 } __packed __aligned(4); 639 640 struct mtk_eth; 641 struct mtk_mac; 642 643 struct mtk_xdp_stats { 644 u64 rx_xdp_redirect; 645 u64 rx_xdp_pass; 646 u64 rx_xdp_drop; 647 u64 rx_xdp_tx; 648 u64 rx_xdp_tx_errors; 649 u64 tx_xdp_xmit; 650 u64 tx_xdp_xmit_errors; 651 }; 652 653 /* struct mtk_hw_stats - the structure that holds the traffic statistics. 654 * @stats_lock: make sure that stats operations are atomic 655 * @reg_offset: the status register offset of the SoC 656 * @syncp: the refcount 657 * 658 * All of the supported SoCs have hardware counters for traffic statistics. 659 * Whenever the status IRQ triggers we can read the latest stats from these 660 * counters and store them in this struct. 661 */ 662 struct mtk_hw_stats { 663 u64 tx_bytes; 664 u64 tx_packets; 665 u64 tx_skip; 666 u64 tx_collisions; 667 u64 rx_bytes; 668 u64 rx_packets; 669 u64 rx_overflow; 670 u64 rx_fcs_errors; 671 u64 rx_short_errors; 672 u64 rx_long_errors; 673 u64 rx_checksum_errors; 674 u64 rx_flow_control_packets; 675 676 struct mtk_xdp_stats xdp_stats; 677 678 spinlock_t stats_lock; 679 u32 reg_offset; 680 struct u64_stats_sync syncp; 681 }; 682 683 enum mtk_tx_flags { 684 /* PDMA descriptor can point at 1-2 segments. This enum allows us to 685 * track how memory was allocated so that it can be freed properly. 686 */ 687 MTK_TX_FLAGS_SINGLE0 = 0x01, 688 MTK_TX_FLAGS_PAGE0 = 0x02, 689 }; 690 691 /* This enum allows us to identify how the clock is defined on the array of the 692 * clock in the order 693 */ 694 enum mtk_clks_map { 695 MTK_CLK_ETHIF, 696 MTK_CLK_SGMIITOP, 697 MTK_CLK_ESW, 698 MTK_CLK_GP0, 699 MTK_CLK_GP1, 700 MTK_CLK_GP2, 701 MTK_CLK_GP3, 702 MTK_CLK_XGP1, 703 MTK_CLK_XGP2, 704 MTK_CLK_XGP3, 705 MTK_CLK_CRYPTO, 706 MTK_CLK_FE, 707 MTK_CLK_TRGPLL, 708 MTK_CLK_SGMII_TX_250M, 709 MTK_CLK_SGMII_RX_250M, 710 MTK_CLK_SGMII_CDR_REF, 711 MTK_CLK_SGMII_CDR_FB, 712 MTK_CLK_SGMII2_TX_250M, 713 MTK_CLK_SGMII2_RX_250M, 714 MTK_CLK_SGMII2_CDR_REF, 715 MTK_CLK_SGMII2_CDR_FB, 716 MTK_CLK_SGMII_CK, 717 MTK_CLK_ETH2PLL, 718 MTK_CLK_WOCPU0, 719 MTK_CLK_WOCPU1, 720 MTK_CLK_NETSYS0, 721 MTK_CLK_NETSYS1, 722 MTK_CLK_ETHWARP_WOCPU2, 723 MTK_CLK_ETHWARP_WOCPU1, 724 MTK_CLK_ETHWARP_WOCPU0, 725 MTK_CLK_TOP_USXGMII_SBUS_0_SEL, 726 MTK_CLK_TOP_USXGMII_SBUS_1_SEL, 727 MTK_CLK_TOP_SGM_0_SEL, 728 MTK_CLK_TOP_SGM_1_SEL, 729 MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL, 730 MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL, 731 MTK_CLK_TOP_ETH_GMII_SEL, 732 MTK_CLK_TOP_ETH_REFCK_50M_SEL, 733 MTK_CLK_TOP_ETH_SYS_200M_SEL, 734 MTK_CLK_TOP_ETH_SYS_SEL, 735 MTK_CLK_TOP_ETH_XGMII_SEL, 736 MTK_CLK_TOP_ETH_MII_SEL, 737 MTK_CLK_TOP_NETSYS_SEL, 738 MTK_CLK_TOP_NETSYS_500M_SEL, 739 MTK_CLK_TOP_NETSYS_PAO_2X_SEL, 740 MTK_CLK_TOP_NETSYS_SYNC_250M_SEL, 741 MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL, 742 MTK_CLK_TOP_NETSYS_WARP_SEL, 743 MTK_CLK_MAX 744 }; 745 746 #define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 747 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ 748 BIT_ULL(MTK_CLK_TRGPLL)) 749 #define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 750 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ 751 BIT_ULL(MTK_CLK_GP2) | \ 752 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 753 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 754 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 755 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 756 BIT_ULL(MTK_CLK_SGMII_CK) | \ 757 BIT_ULL(MTK_CLK_ETH2PLL)) 758 #define MT7621_CLKS_BITMAP (0) 759 #define MT7628_CLKS_BITMAP (0) 760 #define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 761 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ 762 BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \ 763 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 764 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 765 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 766 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 767 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 768 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 769 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 770 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ 771 BIT_ULL(MTK_CLK_SGMII_CK) | \ 772 BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP)) 773 #define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ 774 BIT_ULL(MTK_CLK_GP1) | \ 775 BIT_ULL(MTK_CLK_WOCPU0) | \ 776 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 777 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 778 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 779 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 780 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 781 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 782 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 783 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ 784 BIT_ULL(MTK_CLK_SGMII_CK)) 785 #define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ 786 BIT_ULL(MTK_CLK_GP1) | \ 787 BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \ 788 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 789 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 790 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 791 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 792 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 793 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 794 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 795 BIT_ULL(MTK_CLK_SGMII2_CDR_FB)) 796 #define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \ 797 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ 798 BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \ 799 BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \ 800 BIT_ULL(MTK_CLK_CRYPTO) | \ 801 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 802 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 803 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 804 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 805 BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \ 806 BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \ 807 BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \ 808 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \ 809 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \ 810 BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \ 811 BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \ 812 BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \ 813 BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \ 814 BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \ 815 BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \ 816 BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \ 817 BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \ 818 BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \ 819 BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \ 820 BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \ 821 BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \ 822 BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \ 823 BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \ 824 BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \ 825 BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL)) 826 827 enum mtk_dev_state { 828 MTK_HW_INIT, 829 MTK_RESETTING 830 }; 831 832 /* PSE Port Definition */ 833 enum mtk_pse_port { 834 PSE_ADMA_PORT = 0, 835 PSE_GDM1_PORT, 836 PSE_GDM2_PORT, 837 PSE_PPE0_PORT, 838 PSE_PPE1_PORT, 839 PSE_QDMA_TX_PORT, 840 PSE_QDMA_RX_PORT, 841 PSE_DROP_PORT, 842 PSE_WDMA0_PORT, 843 PSE_WDMA1_PORT, 844 PSE_TDMA_PORT, 845 PSE_NONE_PORT, 846 PSE_PPE2_PORT, 847 PSE_WDMA2_PORT, 848 PSE_EIP197_PORT, 849 PSE_GDM3_PORT, 850 PSE_PORT_MAX 851 }; 852 853 /* GMAC Identifier */ 854 enum mtk_gmac_id { 855 MTK_GMAC1_ID = 0, 856 MTK_GMAC2_ID, 857 MTK_GMAC3_ID, 858 MTK_GMAC_ID_MAX 859 }; 860 861 enum mtk_tx_buf_type { 862 MTK_TYPE_SKB, 863 MTK_TYPE_XDP_TX, 864 MTK_TYPE_XDP_NDO, 865 }; 866 867 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at 868 * by the TX descriptor s 869 * @skb: The SKB pointer of the packet being sent 870 * @dma_addr0: The base addr of the first segment 871 * @dma_len0: The length of the first segment 872 * @dma_addr1: The base addr of the second segment 873 * @dma_len1: The length of the second segment 874 */ 875 struct mtk_tx_buf { 876 enum mtk_tx_buf_type type; 877 void *data; 878 879 u16 mac_id; 880 u16 flags; 881 DEFINE_DMA_UNMAP_ADDR(dma_addr0); 882 DEFINE_DMA_UNMAP_LEN(dma_len0); 883 DEFINE_DMA_UNMAP_ADDR(dma_addr1); 884 DEFINE_DMA_UNMAP_LEN(dma_len1); 885 }; 886 887 /* struct mtk_tx_ring - This struct holds info describing a TX ring 888 * @dma: The descriptor ring 889 * @buf: The memory pointed at by the ring 890 * @phys: The physical addr of tx_buf 891 * @next_free: Pointer to the next free descriptor 892 * @last_free: Pointer to the last free descriptor 893 * @last_free_ptr: Hardware pointer value of the last free descriptor 894 * @thresh: The threshold of minimum amount of free descriptors 895 * @free_count: QDMA uses a linked list. Track how many free descriptors 896 * are present 897 */ 898 struct mtk_tx_ring { 899 void *dma; 900 struct mtk_tx_buf *buf; 901 dma_addr_t phys; 902 struct mtk_tx_dma *next_free; 903 struct mtk_tx_dma *last_free; 904 u32 last_free_ptr; 905 u16 thresh; 906 atomic_t free_count; 907 int dma_size; 908 struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ 909 dma_addr_t phys_pdma; 910 int cpu_idx; 911 }; 912 913 /* PDMA rx ring mode */ 914 enum mtk_rx_flags { 915 MTK_RX_FLAGS_NORMAL = 0, 916 MTK_RX_FLAGS_HWLRO, 917 MTK_RX_FLAGS_QDMA, 918 }; 919 920 /* struct mtk_rx_ring - This struct holds info describing a RX ring 921 * @dma: The descriptor ring 922 * @data: The memory pointed at by the ring 923 * @phys: The physical addr of rx_buf 924 * @frag_size: How big can each fragment be 925 * @buf_size: The size of each packet buffer 926 * @calc_idx: The current head of ring 927 */ 928 struct mtk_rx_ring { 929 void *dma; 930 u8 **data; 931 dma_addr_t phys; 932 u16 frag_size; 933 u16 buf_size; 934 u16 dma_size; 935 bool calc_idx_update; 936 u16 calc_idx; 937 u32 crx_idx_reg; 938 /* page_pool */ 939 struct page_pool *page_pool; 940 struct xdp_rxq_info xdp_q; 941 }; 942 943 enum mkt_eth_capabilities { 944 MTK_RGMII_BIT = 0, 945 MTK_TRGMII_BIT, 946 MTK_SGMII_BIT, 947 MTK_ESW_BIT, 948 MTK_GEPHY_BIT, 949 MTK_MUX_BIT, 950 MTK_INFRA_BIT, 951 MTK_SHARED_SGMII_BIT, 952 MTK_HWLRO_BIT, 953 MTK_SHARED_INT_BIT, 954 MTK_TRGMII_MT7621_CLK_BIT, 955 MTK_QDMA_BIT, 956 MTK_SOC_MT7628_BIT, 957 MTK_RSTCTRL_PPE1_BIT, 958 MTK_RSTCTRL_PPE2_BIT, 959 MTK_U3_COPHY_V2_BIT, 960 MTK_SRAM_BIT, 961 MTK_36BIT_DMA_BIT, 962 963 /* MUX BITS*/ 964 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, 965 MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, 966 MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, 967 MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, 968 MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, 969 970 /* PATH BITS */ 971 MTK_ETH_PATH_GMAC1_RGMII_BIT, 972 MTK_ETH_PATH_GMAC1_TRGMII_BIT, 973 MTK_ETH_PATH_GMAC1_SGMII_BIT, 974 MTK_ETH_PATH_GMAC2_RGMII_BIT, 975 MTK_ETH_PATH_GMAC2_SGMII_BIT, 976 MTK_ETH_PATH_GMAC2_GEPHY_BIT, 977 MTK_ETH_PATH_GDM1_ESW_BIT, 978 }; 979 980 /* Supported hardware group on SoCs */ 981 #define MTK_RGMII BIT_ULL(MTK_RGMII_BIT) 982 #define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT) 983 #define MTK_SGMII BIT_ULL(MTK_SGMII_BIT) 984 #define MTK_ESW BIT_ULL(MTK_ESW_BIT) 985 #define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT) 986 #define MTK_MUX BIT_ULL(MTK_MUX_BIT) 987 #define MTK_INFRA BIT_ULL(MTK_INFRA_BIT) 988 #define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT) 989 #define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT) 990 #define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT) 991 #define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT) 992 #define MTK_QDMA BIT_ULL(MTK_QDMA_BIT) 993 #define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT) 994 #define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT) 995 #define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT) 996 #define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT) 997 #define MTK_SRAM BIT_ULL(MTK_SRAM_BIT) 998 #define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT) 999 1000 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ 1001 BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) 1002 #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ 1003 BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) 1004 #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ 1005 BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) 1006 #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 1007 BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) 1008 #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ 1009 BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) 1010 1011 /* Supported path present on SoCs */ 1012 #define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT) 1013 #define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT) 1014 #define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT) 1015 #define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT) 1016 #define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT) 1017 #define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT) 1018 #define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT) 1019 1020 #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) 1021 #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) 1022 #define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII) 1023 #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) 1024 #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) 1025 #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) 1026 #define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) 1027 1028 /* MUXes present on SoCs */ 1029 /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ 1030 #define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX) 1031 1032 /* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */ 1033 #define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \ 1034 (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA) 1035 1036 /* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */ 1037 #define MTK_MUX_U3_GMAC2_TO_QPHY \ 1038 (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA) 1039 1040 /* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */ 1041 #define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 1042 (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ 1043 MTK_SHARED_SGMII) 1044 1045 /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ 1046 #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ 1047 (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) 1048 1049 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) 1050 1051 #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ 1052 MTK_GMAC2_RGMII | MTK_SHARED_INT | \ 1053 MTK_TRGMII_MT7621_CLK | MTK_QDMA) 1054 1055 #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ 1056 MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ 1057 MTK_MUX_GDM1_TO_GMAC1_ESW | \ 1058 MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) 1059 1060 #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ 1061 MTK_QDMA) 1062 1063 #define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) 1064 1065 #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 1066 MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ 1067 MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ 1068 MTK_MUX_U3_GMAC2_TO_QPHY | \ 1069 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) 1070 1071 #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 1072 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 1073 MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ 1074 MTK_RSTCTRL_PPE1 | MTK_SRAM) 1075 1076 #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ 1077 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 1078 MTK_RSTCTRL_PPE1 | MTK_SRAM) 1079 1080 #define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \ 1081 MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM) 1082 1083 struct mtk_tx_dma_desc_info { 1084 dma_addr_t addr; 1085 u32 size; 1086 u16 vlan_tci; 1087 u16 qid; 1088 u8 gso:1; 1089 u8 csum:1; 1090 u8 vlan:1; 1091 u8 first:1; 1092 u8 last:1; 1093 }; 1094 1095 struct mtk_reg_map { 1096 u32 tx_irq_mask; 1097 u32 tx_irq_status; 1098 struct { 1099 u32 rx_ptr; /* rx base pointer */ 1100 u32 rx_cnt_cfg; /* rx max count configuration */ 1101 u32 pcrx_ptr; /* rx cpu pointer */ 1102 u32 glo_cfg; /* global configuration */ 1103 u32 rst_idx; /* reset index */ 1104 u32 delay_irq; /* delay interrupt */ 1105 u32 irq_status; /* interrupt status */ 1106 u32 irq_mask; /* interrupt mask */ 1107 u32 adma_rx_dbg0; 1108 u32 int_grp; 1109 } pdma; 1110 struct { 1111 u32 qtx_cfg; /* tx queue configuration */ 1112 u32 qtx_sch; /* tx queue scheduler configuration */ 1113 u32 rx_ptr; /* rx base pointer */ 1114 u32 rx_cnt_cfg; /* rx max count configuration */ 1115 u32 qcrx_ptr; /* rx cpu pointer */ 1116 u32 glo_cfg; /* global configuration */ 1117 u32 rst_idx; /* reset index */ 1118 u32 delay_irq; /* delay interrupt */ 1119 u32 fc_th; /* flow control */ 1120 u32 int_grp; 1121 u32 hred; /* interrupt mask */ 1122 u32 ctx_ptr; /* tx acquire cpu pointer */ 1123 u32 dtx_ptr; /* tx acquire dma pointer */ 1124 u32 crx_ptr; /* tx release cpu pointer */ 1125 u32 drx_ptr; /* tx release dma pointer */ 1126 u32 fq_head; /* fq head pointer */ 1127 u32 fq_tail; /* fq tail pointer */ 1128 u32 fq_count; /* fq free page count */ 1129 u32 fq_blen; /* fq free page buffer length */ 1130 u32 tx_sch_rate; /* tx scheduler rate control registers */ 1131 } qdma; 1132 u32 gdm1_cnt; 1133 u32 gdma_to_ppe; 1134 u32 ppe_base; 1135 u32 wdma_base[2]; 1136 u32 pse_iq_sta; 1137 u32 pse_oq_sta; 1138 }; 1139 1140 /* struct mtk_eth_data - This is the structure holding all differences 1141 * among various plaforms 1142 * @reg_map Soc register map. 1143 * @ana_rgc3: The offset for register ANA_RGC3 related to 1144 * sgmiisys syscon 1145 * @caps Flags shown the extra capability for the SoC 1146 * @hw_features Flags shown HW features 1147 * @required_clks Flags shown the bitmap for required clocks on 1148 * the target SoC 1149 * @required_pctl A bool value to show whether the SoC requires 1150 * the extra setup for those pins used by GMAC. 1151 * @hash_offset Flow table hash offset. 1152 * @version SoC version. 1153 * @foe_entry_size Foe table entry size. 1154 * @has_accounting Bool indicating support for accounting of 1155 * offloaded flows. 1156 * @txd_size Tx DMA descriptor size. 1157 * @rxd_size Rx DMA descriptor size. 1158 * @rx_irq_done_mask Rx irq done register mask. 1159 * @rx_dma_l4_valid Rx DMA valid register mask. 1160 * @dma_max_len Max DMA tx/rx buffer length. 1161 * @dma_len_offset Tx/Rx DMA length field offset. 1162 */ 1163 struct mtk_soc_data { 1164 const struct mtk_reg_map *reg_map; 1165 u32 ana_rgc3; 1166 u64 caps; 1167 u64 required_clks; 1168 bool required_pctl; 1169 u8 offload_version; 1170 u8 hash_offset; 1171 u8 version; 1172 u16 foe_entry_size; 1173 netdev_features_t hw_features; 1174 bool has_accounting; 1175 bool disable_pll_modes; 1176 struct { 1177 u32 txd_size; 1178 u32 rxd_size; 1179 u32 rx_irq_done_mask; 1180 u32 rx_dma_l4_valid; 1181 u32 dma_max_len; 1182 u32 dma_len_offset; 1183 } txrx; 1184 }; 1185 1186 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) 1187 1188 /* currently no SoC has more than 3 macs */ 1189 #define MTK_MAX_DEVS 3 1190 1191 /* struct mtk_eth - This is the main datasructure for holding the state 1192 * of the driver 1193 * @dev: The device pointer 1194 * @dev: The device pointer used for dma mapping/alloc 1195 * @base: The mapped register i/o base 1196 * @page_lock: Make sure that register operations are atomic 1197 * @tx_irq__lock: Make sure that IRQ register operations are atomic 1198 * @rx_irq__lock: Make sure that IRQ register operations are atomic 1199 * @dim_lock: Make sure that Net DIM operations are atomic 1200 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a 1201 * dummy for NAPI to work 1202 * @netdev: The netdev instances 1203 * @mac: Each netdev is linked to a physical MAC 1204 * @irq: The IRQ that we are using 1205 * @msg_enable: Ethtool msg level 1206 * @ethsys: The register map pointing at the range used to setup 1207 * MII modes 1208 * @infra: The register map pointing at the range used to setup 1209 * SGMII and GePHY path 1210 * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances 1211 * @pctl: The register map pointing at the range used to setup 1212 * GMAC port drive/slew values 1213 * @dma_refcnt: track how many netdevs are using the DMA engine 1214 * @tx_ring: Pointer to the memory holding info about the TX ring 1215 * @rx_ring: Pointer to the memory holding info about the RX ring 1216 * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring 1217 * @tx_napi: The TX NAPI struct 1218 * @rx_napi: The RX NAPI struct 1219 * @rx_events: Net DIM RX event counter 1220 * @rx_packets: Net DIM RX packet counter 1221 * @rx_bytes: Net DIM RX byte counter 1222 * @rx_dim: Net DIM RX context 1223 * @tx_events: Net DIM TX event counter 1224 * @tx_packets: Net DIM TX packet counter 1225 * @tx_bytes: Net DIM TX byte counter 1226 * @tx_dim: Net DIM TX context 1227 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 1228 * @phy_scratch_ring: physical address of scratch_ring 1229 * @scratch_head: The scratch memory that scratch_ring points to. 1230 * @clks: clock array for all clocks required 1231 * @mii_bus: If there is a bus we need to create an instance for it 1232 * @pending_work: The workqueue used to reset the dma ring 1233 * @state: Initialization and runtime state of the device 1234 * @soc: Holding specific data among vaious SoCs 1235 */ 1236 1237 struct mtk_eth { 1238 struct device *dev; 1239 struct device *dma_dev; 1240 void __iomem *base; 1241 void *sram_base; 1242 spinlock_t page_lock; 1243 spinlock_t tx_irq_lock; 1244 spinlock_t rx_irq_lock; 1245 struct net_device dummy_dev; 1246 struct net_device *netdev[MTK_MAX_DEVS]; 1247 struct mtk_mac *mac[MTK_MAX_DEVS]; 1248 int irq[3]; 1249 u32 msg_enable; 1250 unsigned long sysclk; 1251 struct regmap *ethsys; 1252 struct regmap *infra; 1253 struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS]; 1254 struct regmap *pctl; 1255 bool hwlro; 1256 refcount_t dma_refcnt; 1257 struct mtk_tx_ring tx_ring; 1258 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; 1259 struct mtk_rx_ring rx_ring_qdma; 1260 struct napi_struct tx_napi; 1261 struct napi_struct rx_napi; 1262 void *scratch_ring; 1263 dma_addr_t phy_scratch_ring; 1264 void *scratch_head; 1265 struct clk *clks[MTK_CLK_MAX]; 1266 1267 struct mii_bus *mii_bus; 1268 struct work_struct pending_work; 1269 unsigned long state; 1270 1271 const struct mtk_soc_data *soc; 1272 1273 spinlock_t dim_lock; 1274 1275 u32 rx_events; 1276 u32 rx_packets; 1277 u32 rx_bytes; 1278 struct dim rx_dim; 1279 1280 u32 tx_events; 1281 u32 tx_packets; 1282 u32 tx_bytes; 1283 struct dim tx_dim; 1284 1285 int ip_align; 1286 1287 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; 1288 1289 struct mtk_ppe *ppe[2]; 1290 struct rhashtable flow_table; 1291 1292 struct bpf_prog __rcu *prog; 1293 1294 struct { 1295 struct delayed_work monitor_work; 1296 u32 wdidx; 1297 u8 wdma_hang_count; 1298 u8 qdma_hang_count; 1299 u8 adma_hang_count; 1300 } reset; 1301 }; 1302 1303 /* struct mtk_mac - the structure that holds the info about the MACs of the 1304 * SoC 1305 * @id: The number of the MAC 1306 * @interface: Interface mode kept for detecting change in hw settings 1307 * @of_node: Our devicetree node 1308 * @hw: Backpointer to our main datastruture 1309 * @hw_stats: Packet statistics counter 1310 */ 1311 struct mtk_mac { 1312 int id; 1313 phy_interface_t interface; 1314 int speed; 1315 struct device_node *of_node; 1316 struct phylink *phylink; 1317 struct phylink_config phylink_config; 1318 struct mtk_eth *hw; 1319 struct mtk_hw_stats *hw_stats; 1320 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; 1321 int hwlro_ip_cnt; 1322 unsigned int syscfg0; 1323 struct notifier_block device_notifier; 1324 }; 1325 1326 /* the struct describing the SoC. these are declared in the soc_xyz.c files */ 1327 extern const struct of_device_id of_mtk_match[]; 1328 1329 static inline bool mtk_is_netsys_v1(struct mtk_eth *eth) 1330 { 1331 return eth->soc->version == 1; 1332 } 1333 1334 static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth) 1335 { 1336 return eth->soc->version > 1; 1337 } 1338 1339 static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth) 1340 { 1341 return eth->soc->version > 2; 1342 } 1343 1344 static inline struct mtk_foe_entry * 1345 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) 1346 { 1347 const struct mtk_soc_data *soc = ppe->eth->soc; 1348 1349 return ppe->foe_table + hash * soc->foe_entry_size; 1350 } 1351 1352 static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) 1353 { 1354 if (mtk_is_netsys_v2_or_greater(eth)) 1355 return MTK_FOE_IB1_BIND_TIMESTAMP_V2; 1356 1357 return MTK_FOE_IB1_BIND_TIMESTAMP; 1358 } 1359 1360 static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) 1361 { 1362 if (mtk_is_netsys_v2_or_greater(eth)) 1363 return MTK_FOE_IB1_BIND_PPPOE_V2; 1364 1365 return MTK_FOE_IB1_BIND_PPPOE; 1366 } 1367 1368 static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) 1369 { 1370 if (mtk_is_netsys_v2_or_greater(eth)) 1371 return MTK_FOE_IB1_BIND_VLAN_TAG_V2; 1372 1373 return MTK_FOE_IB1_BIND_VLAN_TAG; 1374 } 1375 1376 static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) 1377 { 1378 if (mtk_is_netsys_v2_or_greater(eth)) 1379 return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; 1380 1381 return MTK_FOE_IB1_BIND_VLAN_LAYER; 1382 } 1383 1384 static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1385 { 1386 if (mtk_is_netsys_v2_or_greater(eth)) 1387 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1388 1389 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1390 } 1391 1392 static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1393 { 1394 if (mtk_is_netsys_v2_or_greater(eth)) 1395 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1396 1397 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1398 } 1399 1400 static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) 1401 { 1402 if (mtk_is_netsys_v2_or_greater(eth)) 1403 return MTK_FOE_IB1_PACKET_TYPE_V2; 1404 1405 return MTK_FOE_IB1_PACKET_TYPE; 1406 } 1407 1408 static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) 1409 { 1410 if (mtk_is_netsys_v2_or_greater(eth)) 1411 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); 1412 1413 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); 1414 } 1415 1416 static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) 1417 { 1418 if (mtk_is_netsys_v2_or_greater(eth)) 1419 return MTK_FOE_IB2_MULTICAST_V2; 1420 1421 return MTK_FOE_IB2_MULTICAST; 1422 } 1423 1424 /* read the hardware status register */ 1425 void mtk_stats_update_mac(struct mtk_mac *mac); 1426 1427 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); 1428 u32 mtk_r32(struct mtk_eth *eth, unsigned reg); 1429 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg); 1430 1431 int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); 1432 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); 1433 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); 1434 1435 int mtk_eth_offload_init(struct mtk_eth *eth); 1436 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 1437 void *type_data); 1438 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, 1439 int ppe_index); 1440 void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list); 1441 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); 1442 1443 1444 #endif /* MTK_ETH_H */ 1445