1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> 5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> 6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> 7 */ 8 9 #ifndef MTK_ETH_H 10 #define MTK_ETH_H 11 12 #include <linux/dma-mapping.h> 13 #include <linux/netdevice.h> 14 #include <linux/of_net.h> 15 #include <linux/u64_stats_sync.h> 16 #include <linux/refcount.h> 17 #include <linux/phylink.h> 18 #include <linux/rhashtable.h> 19 #include <linux/dim.h> 20 #include <linux/bitfield.h> 21 #include <net/page_pool.h> 22 #include <linux/bpf_trace.h> 23 #include "mtk_ppe.h" 24 25 #define MTK_MAX_DSA_PORTS 7 26 #define MTK_DSA_PORT_MASK GENMASK(2, 0) 27 28 #define MTK_QDMA_NUM_QUEUES 16 29 #define MTK_QDMA_PAGE_SIZE 2048 30 #define MTK_MAX_RX_LENGTH 1536 31 #define MTK_MAX_RX_LENGTH_2K 2048 32 #define MTK_TX_DMA_BUF_LEN 0x3fff 33 #define MTK_TX_DMA_BUF_LEN_V2 0xffff 34 #define MTK_QDMA_RING_SIZE 2048 35 #define MTK_DMA_SIZE 512 36 #define MTK_MAC_COUNT 2 37 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) 38 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) 39 #define MTK_DMA_DUMMY_DESC 0xffffffff 40 #define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ 41 NETIF_MSG_PROBE | \ 42 NETIF_MSG_LINK | \ 43 NETIF_MSG_TIMER | \ 44 NETIF_MSG_IFDOWN | \ 45 NETIF_MSG_IFUP | \ 46 NETIF_MSG_RX_ERR | \ 47 NETIF_MSG_TX_ERR) 48 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ 49 NETIF_F_RXCSUM | \ 50 NETIF_F_HW_VLAN_CTAG_TX | \ 51 NETIF_F_SG | NETIF_F_TSO | \ 52 NETIF_F_TSO6 | \ 53 NETIF_F_IPV6_CSUM |\ 54 NETIF_F_HW_TC) 55 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) 56 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) 57 58 #define MTK_PP_HEADROOM XDP_PACKET_HEADROOM 59 #define MTK_PP_PAD (MTK_PP_HEADROOM + \ 60 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 61 #define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD) 62 63 #define MTK_QRX_OFFSET 0x10 64 65 #define MTK_MAX_RX_RING_NUM 4 66 #define MTK_HW_LRO_DMA_SIZE 8 67 68 #define MTK_MAX_LRO_RX_LENGTH (4096 * 3) 69 #define MTK_MAX_LRO_IP_CNT 2 70 #define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */ 71 #define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */ 72 #define MTK_HW_LRO_AGG_TIME 10 /* 200us */ 73 #define MTK_HW_LRO_AGE_TIME 50 /* 1ms */ 74 #define MTK_HW_LRO_MAX_AGG_CNT 64 75 #define MTK_HW_LRO_BW_THRE 3000 76 #define MTK_HW_LRO_REPLACE_DELTA 1000 77 #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 78 79 /* Frame Engine Global Configuration */ 80 #define MTK_FE_GLO_CFG 0x00 81 #define MTK_FE_LINK_DOWN_P3 BIT(11) 82 #define MTK_FE_LINK_DOWN_P4 BIT(12) 83 84 /* Frame Engine Global Reset Register */ 85 #define MTK_RST_GL 0x04 86 #define RST_GL_PSE BIT(0) 87 88 /* Frame Engine Interrupt Status Register */ 89 #define MTK_INT_STATUS2 0x08 90 #define MTK_FE_INT_ENABLE 0x0c 91 #define MTK_FE_INT_FQ_EMPTY BIT(8) 92 #define MTK_FE_INT_TSO_FAIL BIT(12) 93 #define MTK_FE_INT_TSO_ILLEGAL BIT(13) 94 #define MTK_FE_INT_TSO_ALIGN BIT(14) 95 #define MTK_FE_INT_RFIFO_OV BIT(18) 96 #define MTK_FE_INT_RFIFO_UF BIT(19) 97 #define MTK_GDM1_AF BIT(28) 98 #define MTK_GDM2_AF BIT(29) 99 100 /* PDMA HW LRO Alter Flow Timer Register */ 101 #define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c 102 103 /* Frame Engine Interrupt Grouping Register */ 104 #define MTK_FE_INT_GRP 0x20 105 106 /* CDMP Ingress Control Register */ 107 #define MTK_CDMQ_IG_CTRL 0x1400 108 #define MTK_CDMQ_STAG_EN BIT(0) 109 110 /* CDMQ Exgress Control Register */ 111 #define MTK_CDMQ_EG_CTRL 0x1404 112 113 /* CDMP Ingress Control Register */ 114 #define MTK_CDMP_IG_CTRL 0x400 115 #define MTK_CDMP_STAG_EN BIT(0) 116 117 /* CDMP Exgress Control Register */ 118 #define MTK_CDMP_EG_CTRL 0x404 119 120 /* GDM Exgress Control Register */ 121 #define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) 122 #define MTK_GDMA_SPECIAL_TAG BIT(24) 123 #define MTK_GDMA_ICS_EN BIT(22) 124 #define MTK_GDMA_TCS_EN BIT(21) 125 #define MTK_GDMA_UCS_EN BIT(20) 126 #define MTK_GDMA_TO_PDMA 0x0 127 #define MTK_GDMA_DROP_ALL 0x7777 128 129 /* Unicast Filter MAC Address Register - Low */ 130 #define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) 131 132 /* Unicast Filter MAC Address Register - High */ 133 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) 134 135 /* FE global misc reg*/ 136 #define MTK_FE_GLO_MISC 0x124 137 138 /* PSE Free Queue Flow Control */ 139 #define PSE_FQFC_CFG1 0x100 140 #define PSE_FQFC_CFG2 0x104 141 #define PSE_DROP_CFG 0x108 142 #define PSE_PPE0_DROP 0x110 143 144 /* PSE Input Queue Reservation Register*/ 145 #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) 146 147 /* PSE Output Queue Threshold Register*/ 148 #define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) 149 150 /* GDM and CDM Threshold */ 151 #define MTK_GDM2_THRES 0x1530 152 #define MTK_CDMW0_THRES 0x164c 153 #define MTK_CDMW1_THRES 0x1650 154 #define MTK_CDME0_THRES 0x1654 155 #define MTK_CDME1_THRES 0x1658 156 #define MTK_CDMM_THRES 0x165c 157 158 /* PDMA HW LRO Control Registers */ 159 #define MTK_PDMA_LRO_CTRL_DW0 0x980 160 #define MTK_LRO_EN BIT(0) 161 #define MTK_L3_CKS_UPD_EN BIT(7) 162 #define MTK_L3_CKS_UPD_EN_V2 BIT(19) 163 #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) 164 #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) 165 #define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) 166 #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) 167 #define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) 168 169 #define MTK_PDMA_LRO_CTRL_DW1 0x984 170 #define MTK_PDMA_LRO_CTRL_DW2 0x988 171 #define MTK_PDMA_LRO_CTRL_DW3 0x98c 172 #define MTK_ADMA_MODE BIT(15) 173 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) 174 175 #define MTK_RX_DMA_LRO_EN BIT(8) 176 #define MTK_MULTI_EN BIT(10) 177 #define MTK_PDMA_SIZE_8DWORDS (1 << 4) 178 179 /* PDMA Global Configuration Register */ 180 #define MTK_PDMA_LRO_SDL 0x3000 181 #define MTK_RX_CFG_SDL_OFFSET 16 182 183 /* PDMA Reset Index Register */ 184 #define MTK_PST_DRX_IDX0 BIT(16) 185 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) 186 187 /* PDMA Delay Interrupt Register */ 188 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) 189 #define MTK_PDMA_DELAY_RX_EN BIT(15) 190 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 191 #define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 192 193 #define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) 194 #define MTK_PDMA_DELAY_TX_EN BIT(31) 195 #define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 196 #define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 197 198 #define MTK_PDMA_DELAY_PINT_MASK 0x7f 199 #define MTK_PDMA_DELAY_PTIME_MASK 0xff 200 201 /* PDMA HW LRO Alter Flow Delta Register */ 202 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c 203 204 /* PDMA HW LRO IP Setting Registers */ 205 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04 206 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) 207 #define MTK_RING_MYIP_VLD BIT(9) 208 209 /* PDMA HW LRO Ring Control Registers */ 210 #define MTK_LRO_RX_RING0_CTRL_DW1 0xb28 211 #define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c 212 #define MTK_LRO_RX_RING0_CTRL_DW3 0xb30 213 #define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40)) 214 #define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40)) 215 #define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40)) 216 #define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22) 217 #define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f) 218 #define MTK_RING_AUTO_LERAN_MODE (3 << 6) 219 #define MTK_RING_VLD BIT(8) 220 #define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10) 221 #define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26) 222 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) 223 224 /* QDMA TX Queue Configuration Registers */ 225 #define MTK_QTX_OFFSET 0x10 226 #define QDMA_RES_THRES 4 227 228 /* QDMA Tx Queue Scheduler Configuration Registers */ 229 #define MTK_QTX_SCH_TX_SEL BIT(31) 230 #define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30) 231 232 #define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30) 233 #define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28) 234 #define MTK_QTX_SCH_MIN_RATE_EN BIT(27) 235 #define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20) 236 #define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16) 237 #define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12) 238 #define MTK_QTX_SCH_MAX_RATE_EN BIT(11) 239 #define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4) 240 #define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0) 241 242 /* QDMA TX Scheduler Rate Control Register */ 243 #define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15) 244 245 /* QDMA Global Configuration Register */ 246 #define MTK_RX_2B_OFFSET BIT(31) 247 #define MTK_RX_BT_32DWORDS (3 << 11) 248 #define MTK_NDP_CO_PRO BIT(10) 249 #define MTK_TX_WB_DDONE BIT(6) 250 #define MTK_TX_BT_32DWORDS (3 << 4) 251 #define MTK_RX_DMA_BUSY BIT(3) 252 #define MTK_TX_DMA_BUSY BIT(1) 253 #define MTK_RX_DMA_EN BIT(2) 254 #define MTK_TX_DMA_EN BIT(0) 255 #define MTK_DMA_BUSY_TIMEOUT_US 1000000 256 257 /* QDMA V2 Global Configuration Register */ 258 #define MTK_CHK_DDONE_EN BIT(28) 259 #define MTK_DMAD_WR_WDONE BIT(26) 260 #define MTK_WCOMP_EN BIT(24) 261 #define MTK_RESV_BUF (0x40 << 16) 262 #define MTK_MUTLI_CNT (0x4 << 12) 263 #define MTK_LEAKY_BUCKET_EN BIT(11) 264 265 /* QDMA Flow Control Register */ 266 #define FC_THRES_DROP_MODE BIT(20) 267 #define FC_THRES_DROP_EN (7 << 16) 268 #define FC_THRES_MIN 0x4444 269 270 /* QDMA Interrupt Status Register */ 271 #define MTK_RX_DONE_DLY BIT(30) 272 #define MTK_TX_DONE_DLY BIT(28) 273 #define MTK_RX_DONE_INT3 BIT(19) 274 #define MTK_RX_DONE_INT2 BIT(18) 275 #define MTK_RX_DONE_INT1 BIT(17) 276 #define MTK_RX_DONE_INT0 BIT(16) 277 #define MTK_TX_DONE_INT3 BIT(3) 278 #define MTK_TX_DONE_INT2 BIT(2) 279 #define MTK_TX_DONE_INT1 BIT(1) 280 #define MTK_TX_DONE_INT0 BIT(0) 281 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY 282 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY 283 284 #define MTK_RX_DONE_INT_V2 BIT(14) 285 286 #define MTK_CDM_TXFIFO_RDY BIT(7) 287 288 /* QDMA Interrupt grouping registers */ 289 #define MTK_RLS_DONE_INT BIT(0) 290 291 #define MTK_STAT_OFFSET 0x40 292 293 /* QDMA TX NUM */ 294 #define QID_BITS_V2(x) (((x) & 0x3f) << 16) 295 #define MTK_QDMA_GMAC2_QID 8 296 297 #define MTK_TX_DMA_BUF_SHIFT 8 298 299 /* QDMA V2 descriptor txd6 */ 300 #define TX_DMA_INS_VLAN_V2 BIT(16) 301 /* QDMA V2 descriptor txd5 */ 302 #define TX_DMA_CHKSUM_V2 (0x7 << 28) 303 #define TX_DMA_TSO_V2 BIT(31) 304 305 /* QDMA V2 descriptor txd4 */ 306 #define TX_DMA_FPORT_SHIFT_V2 8 307 #define TX_DMA_FPORT_MASK_V2 0xf 308 #define TX_DMA_SWC_V2 BIT(30) 309 310 /* QDMA descriptor txd4 */ 311 #define TX_DMA_CHKSUM (0x7 << 29) 312 #define TX_DMA_TSO BIT(28) 313 #define TX_DMA_FPORT_SHIFT 25 314 #define TX_DMA_FPORT_MASK 0x7 315 #define TX_DMA_INS_VLAN BIT(16) 316 317 /* QDMA descriptor txd3 */ 318 #define TX_DMA_OWNER_CPU BIT(31) 319 #define TX_DMA_LS0 BIT(30) 320 #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 321 #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) 322 #define TX_DMA_SWC BIT(14) 323 #define TX_DMA_PQID GENMASK(3, 0) 324 325 /* PDMA on MT7628 */ 326 #define TX_DMA_DONE BIT(31) 327 #define TX_DMA_LS1 BIT(14) 328 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) 329 330 /* QDMA descriptor rxd2 */ 331 #define RX_DMA_DONE BIT(31) 332 #define RX_DMA_LSO BIT(30) 333 #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 334 #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) 335 #define RX_DMA_VTAG BIT(15) 336 337 /* QDMA descriptor rxd3 */ 338 #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) 339 #define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) 340 #define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) 341 342 /* QDMA descriptor rxd4 */ 343 #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) 344 #define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) 345 #define MTK_RXD4_SRC_PORT GENMASK(21, 19) 346 #define MTK_RXD4_ALG GENMASK(31, 22) 347 348 /* QDMA descriptor rxd4 */ 349 #define RX_DMA_L4_VALID BIT(24) 350 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ 351 #define RX_DMA_SPECIAL_TAG BIT(22) 352 353 /* PDMA descriptor rxd5 */ 354 #define MTK_RXD5_FOE_ENTRY GENMASK(14, 0) 355 #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) 356 #define MTK_RXD5_SRC_PORT GENMASK(29, 26) 357 358 #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) 359 #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) 360 361 /* PDMA V2 descriptor rxd3 */ 362 #define RX_DMA_VTAG_V2 BIT(0) 363 #define RX_DMA_L4_VALID_V2 BIT(2) 364 365 /* PHY Polling and SMI Master Control registers */ 366 #define MTK_PPSC 0x10000 367 #define PPSC_MDC_CFG GENMASK(29, 24) 368 #define PPSC_MDC_TURBO BIT(20) 369 #define MDC_MAX_FREQ 25000000 370 #define MDC_MAX_DIVIDER 63 371 372 /* PHY Indirect Access Control registers */ 373 #define MTK_PHY_IAC 0x10004 374 #define PHY_IAC_ACCESS BIT(31) 375 #define PHY_IAC_REG_MASK GENMASK(29, 25) 376 #define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x)) 377 #define PHY_IAC_ADDR_MASK GENMASK(24, 20) 378 #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) 379 #define PHY_IAC_CMD_MASK GENMASK(19, 18) 380 #define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0) 381 #define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1) 382 #define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2) 383 #define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3) 384 #define PHY_IAC_START_MASK GENMASK(17, 16) 385 #define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0) 386 #define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1) 387 #define PHY_IAC_DATA_MASK GENMASK(15, 0) 388 #define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x)) 389 #define PHY_IAC_TIMEOUT HZ 390 391 #define MTK_MAC_MISC 0x1000c 392 #define MTK_MUX_TO_ESW BIT(0) 393 394 /* Mac control registers */ 395 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) 396 #define MAC_MCR_MAX_RX_MASK GENMASK(25, 24) 397 #define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24)) 398 #define MAC_MCR_MAX_RX_1518 0x0 399 #define MAC_MCR_MAX_RX_1536 0x1 400 #define MAC_MCR_MAX_RX_1552 0x2 401 #define MAC_MCR_MAX_RX_2048 0x3 402 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) 403 #define MAC_MCR_FORCE_MODE BIT(15) 404 #define MAC_MCR_TX_EN BIT(14) 405 #define MAC_MCR_RX_EN BIT(13) 406 #define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) 407 #define MAC_MCR_BACKOFF_EN BIT(9) 408 #define MAC_MCR_BACKPR_EN BIT(8) 409 #define MAC_MCR_FORCE_RX_FC BIT(5) 410 #define MAC_MCR_FORCE_TX_FC BIT(4) 411 #define MAC_MCR_SPEED_1000 BIT(3) 412 #define MAC_MCR_SPEED_100 BIT(2) 413 #define MAC_MCR_FORCE_DPX BIT(1) 414 #define MAC_MCR_FORCE_LINK BIT(0) 415 #define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) 416 417 /* Mac status registers */ 418 #define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) 419 #define MAC_MSR_EEE1G BIT(7) 420 #define MAC_MSR_EEE100M BIT(6) 421 #define MAC_MSR_RX_FC BIT(5) 422 #define MAC_MSR_TX_FC BIT(4) 423 #define MAC_MSR_SPEED_1000 BIT(3) 424 #define MAC_MSR_SPEED_100 BIT(2) 425 #define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) 426 #define MAC_MSR_DPX BIT(1) 427 #define MAC_MSR_LINK BIT(0) 428 429 /* TRGMII RXC control register */ 430 #define TRGMII_RCK_CTRL 0x10300 431 #define DQSI0(x) ((x << 0) & GENMASK(6, 0)) 432 #define DQSI1(x) ((x << 8) & GENMASK(14, 8)) 433 #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 434 #define RXC_RST BIT(31) 435 #define RXC_DQSISEL BIT(30) 436 #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) 437 #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) 438 439 #define NUM_TRGMII_CTRL 5 440 441 /* TRGMII RXC control register */ 442 #define TRGMII_TCK_CTRL 0x10340 443 #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 444 #define TXC_INV BIT(30) 445 #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) 446 #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) 447 448 /* TRGMII TX Drive Strength */ 449 #define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) 450 #define TD_DM_DRVP(x) ((x) & 0xf) 451 #define TD_DM_DRVN(x) (((x) & 0xf) << 4) 452 453 /* TRGMII Interface mode register */ 454 #define INTF_MODE 0x10390 455 #define TRGMII_INTF_DIS BIT(0) 456 #define TRGMII_MODE BIT(1) 457 #define TRGMII_CENTRAL_ALIGNED BIT(2) 458 #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) 459 #define INTF_MODE_RGMII_10_100 0 460 461 /* GPIO port control registers for GMAC 2*/ 462 #define GPIO_OD33_CTRL8 0x4c0 463 #define GPIO_BIAS_CTRL 0xed0 464 #define GPIO_DRV_SEL10 0xf00 465 466 /* ethernet subsystem chip id register */ 467 #define ETHSYS_CHIPID0_3 0x0 468 #define ETHSYS_CHIPID4_7 0x4 469 #define MT7623_ETH 7623 470 #define MT7622_ETH 7622 471 #define MT7621_ETH 7621 472 473 /* ethernet system control register */ 474 #define ETHSYS_SYSCFG 0x10 475 #define SYSCFG_DRAM_TYPE_DDR2 BIT(4) 476 477 /* ethernet subsystem config register */ 478 #define ETHSYS_SYSCFG0 0x14 479 #define SYSCFG0_GE_MASK 0x3 480 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) 481 #define SYSCFG0_SGMII_MASK GENMASK(9, 8) 482 #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) 483 #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) 484 #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) 485 #define SYSCFG0_SGMII_GMAC2_V2 BIT(8) 486 487 488 /* ethernet subsystem clock register */ 489 #define ETHSYS_CLKCFG0 0x2c 490 #define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) 491 #define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6)) 492 #define ETHSYS_TRGMII_MT7621_APLL BIT(6) 493 #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) 494 495 /* ethernet reset control register */ 496 #define ETHSYS_RSTCTRL 0x34 497 #define RSTCTRL_FE BIT(6) 498 #define RSTCTRL_PPE0 BIT(31) 499 #define RSTCTRL_PPE0_V2 BIT(30) 500 #define RSTCTRL_PPE1 BIT(31) 501 #define RSTCTRL_ETH BIT(23) 502 503 /* ethernet reset check idle register */ 504 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 505 506 /* ethernet dma channel agent map */ 507 #define ETHSYS_DMA_AG_MAP 0x408 508 #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) 509 #define ETHSYS_DMA_AG_MAP_QDMA BIT(1) 510 #define ETHSYS_DMA_AG_MAP_PPE BIT(2) 511 512 /* Infrasys subsystem config registers */ 513 #define INFRA_MISC2 0x70c 514 #define CO_QPHY_SEL BIT(0) 515 #define GEPHY_MAC_SEL BIT(1) 516 517 /* Top misc registers */ 518 #define USB_PHY_SWITCH_REG 0x218 519 #define QPHY_SEL_MASK GENMASK(1, 0) 520 #define SGMII_QPHY_SEL 0x2 521 522 /* MT7628/88 specific stuff */ 523 #define MT7628_PDMA_OFFSET 0x0800 524 #define MT7628_SDM_OFFSET 0x0c00 525 526 #define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) 527 #define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) 528 #define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) 529 #define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) 530 #define MT7628_PST_DTX_IDX0 BIT(0) 531 532 #define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) 533 #define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) 534 535 /* Counter / stat register */ 536 #define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100) 537 #define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104) 538 #define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108) 539 #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) 540 #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) 541 542 #define MTK_FE_CDM1_FSM 0x220 543 #define MTK_FE_CDM2_FSM 0x224 544 #define MTK_FE_CDM3_FSM 0x238 545 #define MTK_FE_CDM4_FSM 0x298 546 #define MTK_FE_CDM5_FSM 0x318 547 #define MTK_FE_CDM6_FSM 0x328 548 #define MTK_FE_GDM1_FSM 0x228 549 #define MTK_FE_GDM2_FSM 0x22C 550 551 #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) 552 553 struct mtk_rx_dma { 554 unsigned int rxd1; 555 unsigned int rxd2; 556 unsigned int rxd3; 557 unsigned int rxd4; 558 } __packed __aligned(4); 559 560 struct mtk_rx_dma_v2 { 561 unsigned int rxd1; 562 unsigned int rxd2; 563 unsigned int rxd3; 564 unsigned int rxd4; 565 unsigned int rxd5; 566 unsigned int rxd6; 567 unsigned int rxd7; 568 unsigned int rxd8; 569 } __packed __aligned(4); 570 571 struct mtk_tx_dma { 572 unsigned int txd1; 573 unsigned int txd2; 574 unsigned int txd3; 575 unsigned int txd4; 576 } __packed __aligned(4); 577 578 struct mtk_tx_dma_v2 { 579 unsigned int txd1; 580 unsigned int txd2; 581 unsigned int txd3; 582 unsigned int txd4; 583 unsigned int txd5; 584 unsigned int txd6; 585 unsigned int txd7; 586 unsigned int txd8; 587 } __packed __aligned(4); 588 589 struct mtk_eth; 590 struct mtk_mac; 591 592 struct mtk_xdp_stats { 593 u64 rx_xdp_redirect; 594 u64 rx_xdp_pass; 595 u64 rx_xdp_drop; 596 u64 rx_xdp_tx; 597 u64 rx_xdp_tx_errors; 598 u64 tx_xdp_xmit; 599 u64 tx_xdp_xmit_errors; 600 }; 601 602 /* struct mtk_hw_stats - the structure that holds the traffic statistics. 603 * @stats_lock: make sure that stats operations are atomic 604 * @reg_offset: the status register offset of the SoC 605 * @syncp: the refcount 606 * 607 * All of the supported SoCs have hardware counters for traffic statistics. 608 * Whenever the status IRQ triggers we can read the latest stats from these 609 * counters and store them in this struct. 610 */ 611 struct mtk_hw_stats { 612 u64 tx_bytes; 613 u64 tx_packets; 614 u64 tx_skip; 615 u64 tx_collisions; 616 u64 rx_bytes; 617 u64 rx_packets; 618 u64 rx_overflow; 619 u64 rx_fcs_errors; 620 u64 rx_short_errors; 621 u64 rx_long_errors; 622 u64 rx_checksum_errors; 623 u64 rx_flow_control_packets; 624 625 struct mtk_xdp_stats xdp_stats; 626 627 spinlock_t stats_lock; 628 u32 reg_offset; 629 struct u64_stats_sync syncp; 630 }; 631 632 enum mtk_tx_flags { 633 /* PDMA descriptor can point at 1-2 segments. This enum allows us to 634 * track how memory was allocated so that it can be freed properly. 635 */ 636 MTK_TX_FLAGS_SINGLE0 = 0x01, 637 MTK_TX_FLAGS_PAGE0 = 0x02, 638 639 /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted 640 * SKB out instead of looking up through hardware TX descriptor. 641 */ 642 MTK_TX_FLAGS_FPORT0 = 0x04, 643 MTK_TX_FLAGS_FPORT1 = 0x08, 644 }; 645 646 /* This enum allows us to identify how the clock is defined on the array of the 647 * clock in the order 648 */ 649 enum mtk_clks_map { 650 MTK_CLK_ETHIF, 651 MTK_CLK_SGMIITOP, 652 MTK_CLK_ESW, 653 MTK_CLK_GP0, 654 MTK_CLK_GP1, 655 MTK_CLK_GP2, 656 MTK_CLK_FE, 657 MTK_CLK_TRGPLL, 658 MTK_CLK_SGMII_TX_250M, 659 MTK_CLK_SGMII_RX_250M, 660 MTK_CLK_SGMII_CDR_REF, 661 MTK_CLK_SGMII_CDR_FB, 662 MTK_CLK_SGMII2_TX_250M, 663 MTK_CLK_SGMII2_RX_250M, 664 MTK_CLK_SGMII2_CDR_REF, 665 MTK_CLK_SGMII2_CDR_FB, 666 MTK_CLK_SGMII_CK, 667 MTK_CLK_ETH2PLL, 668 MTK_CLK_WOCPU0, 669 MTK_CLK_WOCPU1, 670 MTK_CLK_NETSYS0, 671 MTK_CLK_NETSYS1, 672 MTK_CLK_MAX 673 }; 674 675 #define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ 676 BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ 677 BIT(MTK_CLK_TRGPLL)) 678 #define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ 679 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ 680 BIT(MTK_CLK_GP2) | \ 681 BIT(MTK_CLK_SGMII_TX_250M) | \ 682 BIT(MTK_CLK_SGMII_RX_250M) | \ 683 BIT(MTK_CLK_SGMII_CDR_REF) | \ 684 BIT(MTK_CLK_SGMII_CDR_FB) | \ 685 BIT(MTK_CLK_SGMII_CK) | \ 686 BIT(MTK_CLK_ETH2PLL)) 687 #define MT7621_CLKS_BITMAP (0) 688 #define MT7628_CLKS_BITMAP (0) 689 #define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ 690 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ 691 BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ 692 BIT(MTK_CLK_SGMII_TX_250M) | \ 693 BIT(MTK_CLK_SGMII_RX_250M) | \ 694 BIT(MTK_CLK_SGMII_CDR_REF) | \ 695 BIT(MTK_CLK_SGMII_CDR_FB) | \ 696 BIT(MTK_CLK_SGMII2_TX_250M) | \ 697 BIT(MTK_CLK_SGMII2_RX_250M) | \ 698 BIT(MTK_CLK_SGMII2_CDR_REF) | \ 699 BIT(MTK_CLK_SGMII2_CDR_FB) | \ 700 BIT(MTK_CLK_SGMII_CK) | \ 701 BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) 702 #define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ 703 BIT(MTK_CLK_WOCPU0) | \ 704 BIT(MTK_CLK_SGMII_TX_250M) | \ 705 BIT(MTK_CLK_SGMII_RX_250M) | \ 706 BIT(MTK_CLK_SGMII_CDR_REF) | \ 707 BIT(MTK_CLK_SGMII_CDR_FB) | \ 708 BIT(MTK_CLK_SGMII2_TX_250M) | \ 709 BIT(MTK_CLK_SGMII2_RX_250M) | \ 710 BIT(MTK_CLK_SGMII2_CDR_REF) | \ 711 BIT(MTK_CLK_SGMII2_CDR_FB) | \ 712 BIT(MTK_CLK_SGMII_CK)) 713 #define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ 714 BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ 715 BIT(MTK_CLK_SGMII_TX_250M) | \ 716 BIT(MTK_CLK_SGMII_RX_250M) | \ 717 BIT(MTK_CLK_SGMII_CDR_REF) | \ 718 BIT(MTK_CLK_SGMII_CDR_FB) | \ 719 BIT(MTK_CLK_SGMII2_TX_250M) | \ 720 BIT(MTK_CLK_SGMII2_RX_250M) | \ 721 BIT(MTK_CLK_SGMII2_CDR_REF) | \ 722 BIT(MTK_CLK_SGMII2_CDR_FB)) 723 724 enum mtk_dev_state { 725 MTK_HW_INIT, 726 MTK_RESETTING 727 }; 728 729 enum mtk_tx_buf_type { 730 MTK_TYPE_SKB, 731 MTK_TYPE_XDP_TX, 732 MTK_TYPE_XDP_NDO, 733 }; 734 735 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at 736 * by the TX descriptor s 737 * @skb: The SKB pointer of the packet being sent 738 * @dma_addr0: The base addr of the first segment 739 * @dma_len0: The length of the first segment 740 * @dma_addr1: The base addr of the second segment 741 * @dma_len1: The length of the second segment 742 */ 743 struct mtk_tx_buf { 744 enum mtk_tx_buf_type type; 745 void *data; 746 747 u32 flags; 748 DEFINE_DMA_UNMAP_ADDR(dma_addr0); 749 DEFINE_DMA_UNMAP_LEN(dma_len0); 750 DEFINE_DMA_UNMAP_ADDR(dma_addr1); 751 DEFINE_DMA_UNMAP_LEN(dma_len1); 752 }; 753 754 /* struct mtk_tx_ring - This struct holds info describing a TX ring 755 * @dma: The descriptor ring 756 * @buf: The memory pointed at by the ring 757 * @phys: The physical addr of tx_buf 758 * @next_free: Pointer to the next free descriptor 759 * @last_free: Pointer to the last free descriptor 760 * @last_free_ptr: Hardware pointer value of the last free descriptor 761 * @thresh: The threshold of minimum amount of free descriptors 762 * @free_count: QDMA uses a linked list. Track how many free descriptors 763 * are present 764 */ 765 struct mtk_tx_ring { 766 void *dma; 767 struct mtk_tx_buf *buf; 768 dma_addr_t phys; 769 struct mtk_tx_dma *next_free; 770 struct mtk_tx_dma *last_free; 771 u32 last_free_ptr; 772 u16 thresh; 773 atomic_t free_count; 774 int dma_size; 775 struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ 776 dma_addr_t phys_pdma; 777 int cpu_idx; 778 }; 779 780 /* PDMA rx ring mode */ 781 enum mtk_rx_flags { 782 MTK_RX_FLAGS_NORMAL = 0, 783 MTK_RX_FLAGS_HWLRO, 784 MTK_RX_FLAGS_QDMA, 785 }; 786 787 /* struct mtk_rx_ring - This struct holds info describing a RX ring 788 * @dma: The descriptor ring 789 * @data: The memory pointed at by the ring 790 * @phys: The physical addr of rx_buf 791 * @frag_size: How big can each fragment be 792 * @buf_size: The size of each packet buffer 793 * @calc_idx: The current head of ring 794 */ 795 struct mtk_rx_ring { 796 void *dma; 797 u8 **data; 798 dma_addr_t phys; 799 u16 frag_size; 800 u16 buf_size; 801 u16 dma_size; 802 bool calc_idx_update; 803 u16 calc_idx; 804 u32 crx_idx_reg; 805 /* page_pool */ 806 struct page_pool *page_pool; 807 struct xdp_rxq_info xdp_q; 808 }; 809 810 enum mkt_eth_capabilities { 811 MTK_RGMII_BIT = 0, 812 MTK_TRGMII_BIT, 813 MTK_SGMII_BIT, 814 MTK_ESW_BIT, 815 MTK_GEPHY_BIT, 816 MTK_MUX_BIT, 817 MTK_INFRA_BIT, 818 MTK_SHARED_SGMII_BIT, 819 MTK_HWLRO_BIT, 820 MTK_SHARED_INT_BIT, 821 MTK_TRGMII_MT7621_CLK_BIT, 822 MTK_QDMA_BIT, 823 MTK_NETSYS_V2_BIT, 824 MTK_SOC_MT7628_BIT, 825 MTK_RSTCTRL_PPE1_BIT, 826 MTK_U3_COPHY_V2_BIT, 827 828 /* MUX BITS*/ 829 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, 830 MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, 831 MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, 832 MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, 833 MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, 834 835 /* PATH BITS */ 836 MTK_ETH_PATH_GMAC1_RGMII_BIT, 837 MTK_ETH_PATH_GMAC1_TRGMII_BIT, 838 MTK_ETH_PATH_GMAC1_SGMII_BIT, 839 MTK_ETH_PATH_GMAC2_RGMII_BIT, 840 MTK_ETH_PATH_GMAC2_SGMII_BIT, 841 MTK_ETH_PATH_GMAC2_GEPHY_BIT, 842 MTK_ETH_PATH_GDM1_ESW_BIT, 843 }; 844 845 /* Supported hardware group on SoCs */ 846 #define MTK_RGMII BIT(MTK_RGMII_BIT) 847 #define MTK_TRGMII BIT(MTK_TRGMII_BIT) 848 #define MTK_SGMII BIT(MTK_SGMII_BIT) 849 #define MTK_ESW BIT(MTK_ESW_BIT) 850 #define MTK_GEPHY BIT(MTK_GEPHY_BIT) 851 #define MTK_MUX BIT(MTK_MUX_BIT) 852 #define MTK_INFRA BIT(MTK_INFRA_BIT) 853 #define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT) 854 #define MTK_HWLRO BIT(MTK_HWLRO_BIT) 855 #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) 856 #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) 857 #define MTK_QDMA BIT(MTK_QDMA_BIT) 858 #define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) 859 #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) 860 #define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) 861 #define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT) 862 863 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ 864 BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) 865 #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ 866 BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) 867 #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ 868 BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) 869 #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 870 BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) 871 #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ 872 BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) 873 874 /* Supported path present on SoCs */ 875 #define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT) 876 #define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT) 877 #define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT) 878 #define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT) 879 #define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT) 880 #define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT) 881 #define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT) 882 883 #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) 884 #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) 885 #define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII) 886 #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) 887 #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) 888 #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) 889 #define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) 890 891 /* MUXes present on SoCs */ 892 /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ 893 #define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX) 894 895 /* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */ 896 #define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \ 897 (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA) 898 899 /* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */ 900 #define MTK_MUX_U3_GMAC2_TO_QPHY \ 901 (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA) 902 903 /* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */ 904 #define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 905 (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ 906 MTK_SHARED_SGMII) 907 908 /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ 909 #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ 910 (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) 911 912 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) 913 914 #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ 915 MTK_GMAC2_RGMII | MTK_SHARED_INT | \ 916 MTK_TRGMII_MT7621_CLK | MTK_QDMA) 917 918 #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ 919 MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ 920 MTK_MUX_GDM1_TO_GMAC1_ESW | \ 921 MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) 922 923 #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ 924 MTK_QDMA) 925 926 #define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) 927 928 #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 929 MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ 930 MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ 931 MTK_MUX_U3_GMAC2_TO_QPHY | \ 932 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) 933 934 #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 935 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 936 MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ 937 MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) 938 939 #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ 940 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 941 MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) 942 943 struct mtk_tx_dma_desc_info { 944 dma_addr_t addr; 945 u32 size; 946 u16 vlan_tci; 947 u16 qid; 948 u8 gso:1; 949 u8 csum:1; 950 u8 vlan:1; 951 u8 first:1; 952 u8 last:1; 953 }; 954 955 struct mtk_reg_map { 956 u32 tx_irq_mask; 957 u32 tx_irq_status; 958 struct { 959 u32 rx_ptr; /* rx base pointer */ 960 u32 rx_cnt_cfg; /* rx max count configuration */ 961 u32 pcrx_ptr; /* rx cpu pointer */ 962 u32 glo_cfg; /* global configuration */ 963 u32 rst_idx; /* reset index */ 964 u32 delay_irq; /* delay interrupt */ 965 u32 irq_status; /* interrupt status */ 966 u32 irq_mask; /* interrupt mask */ 967 u32 adma_rx_dbg0; 968 u32 int_grp; 969 } pdma; 970 struct { 971 u32 qtx_cfg; /* tx queue configuration */ 972 u32 qtx_sch; /* tx queue scheduler configuration */ 973 u32 rx_ptr; /* rx base pointer */ 974 u32 rx_cnt_cfg; /* rx max count configuration */ 975 u32 qcrx_ptr; /* rx cpu pointer */ 976 u32 glo_cfg; /* global configuration */ 977 u32 rst_idx; /* reset index */ 978 u32 delay_irq; /* delay interrupt */ 979 u32 fc_th; /* flow control */ 980 u32 int_grp; 981 u32 hred; /* interrupt mask */ 982 u32 ctx_ptr; /* tx acquire cpu pointer */ 983 u32 dtx_ptr; /* tx acquire dma pointer */ 984 u32 crx_ptr; /* tx release cpu pointer */ 985 u32 drx_ptr; /* tx release dma pointer */ 986 u32 fq_head; /* fq head pointer */ 987 u32 fq_tail; /* fq tail pointer */ 988 u32 fq_count; /* fq free page count */ 989 u32 fq_blen; /* fq free page buffer length */ 990 u32 tx_sch_rate; /* tx scheduler rate control registers */ 991 } qdma; 992 u32 gdm1_cnt; 993 u32 gdma_to_ppe; 994 u32 ppe_base; 995 u32 wdma_base[2]; 996 u32 pse_iq_sta; 997 u32 pse_oq_sta; 998 }; 999 1000 /* struct mtk_eth_data - This is the structure holding all differences 1001 * among various plaforms 1002 * @reg_map Soc register map. 1003 * @ana_rgc3: The offset for register ANA_RGC3 related to 1004 * sgmiisys syscon 1005 * @caps Flags shown the extra capability for the SoC 1006 * @hw_features Flags shown HW features 1007 * @required_clks Flags shown the bitmap for required clocks on 1008 * the target SoC 1009 * @required_pctl A bool value to show whether the SoC requires 1010 * the extra setup for those pins used by GMAC. 1011 * @hash_offset Flow table hash offset. 1012 * @foe_entry_size Foe table entry size. 1013 * @has_accounting Bool indicating support for accounting of 1014 * offloaded flows. 1015 * @txd_size Tx DMA descriptor size. 1016 * @rxd_size Rx DMA descriptor size. 1017 * @rx_irq_done_mask Rx irq done register mask. 1018 * @rx_dma_l4_valid Rx DMA valid register mask. 1019 * @dma_max_len Max DMA tx/rx buffer length. 1020 * @dma_len_offset Tx/Rx DMA length field offset. 1021 */ 1022 struct mtk_soc_data { 1023 const struct mtk_reg_map *reg_map; 1024 u32 ana_rgc3; 1025 u32 caps; 1026 u32 required_clks; 1027 bool required_pctl; 1028 u8 offload_version; 1029 u8 hash_offset; 1030 u16 foe_entry_size; 1031 netdev_features_t hw_features; 1032 bool has_accounting; 1033 struct { 1034 u32 txd_size; 1035 u32 rxd_size; 1036 u32 rx_irq_done_mask; 1037 u32 rx_dma_l4_valid; 1038 u32 dma_max_len; 1039 u32 dma_len_offset; 1040 } txrx; 1041 }; 1042 1043 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) 1044 1045 /* currently no SoC has more than 2 macs */ 1046 #define MTK_MAX_DEVS 2 1047 1048 /* struct mtk_eth - This is the main datasructure for holding the state 1049 * of the driver 1050 * @dev: The device pointer 1051 * @dev: The device pointer used for dma mapping/alloc 1052 * @base: The mapped register i/o base 1053 * @page_lock: Make sure that register operations are atomic 1054 * @tx_irq__lock: Make sure that IRQ register operations are atomic 1055 * @rx_irq__lock: Make sure that IRQ register operations are atomic 1056 * @dim_lock: Make sure that Net DIM operations are atomic 1057 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a 1058 * dummy for NAPI to work 1059 * @netdev: The netdev instances 1060 * @mac: Each netdev is linked to a physical MAC 1061 * @irq: The IRQ that we are using 1062 * @msg_enable: Ethtool msg level 1063 * @ethsys: The register map pointing at the range used to setup 1064 * MII modes 1065 * @infra: The register map pointing at the range used to setup 1066 * SGMII and GePHY path 1067 * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances 1068 * @pctl: The register map pointing at the range used to setup 1069 * GMAC port drive/slew values 1070 * @dma_refcnt: track how many netdevs are using the DMA engine 1071 * @tx_ring: Pointer to the memory holding info about the TX ring 1072 * @rx_ring: Pointer to the memory holding info about the RX ring 1073 * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring 1074 * @tx_napi: The TX NAPI struct 1075 * @rx_napi: The RX NAPI struct 1076 * @rx_events: Net DIM RX event counter 1077 * @rx_packets: Net DIM RX packet counter 1078 * @rx_bytes: Net DIM RX byte counter 1079 * @rx_dim: Net DIM RX context 1080 * @tx_events: Net DIM TX event counter 1081 * @tx_packets: Net DIM TX packet counter 1082 * @tx_bytes: Net DIM TX byte counter 1083 * @tx_dim: Net DIM TX context 1084 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 1085 * @phy_scratch_ring: physical address of scratch_ring 1086 * @scratch_head: The scratch memory that scratch_ring points to. 1087 * @clks: clock array for all clocks required 1088 * @mii_bus: If there is a bus we need to create an instance for it 1089 * @pending_work: The workqueue used to reset the dma ring 1090 * @state: Initialization and runtime state of the device 1091 * @soc: Holding specific data among vaious SoCs 1092 */ 1093 1094 struct mtk_eth { 1095 struct device *dev; 1096 struct device *dma_dev; 1097 void __iomem *base; 1098 spinlock_t page_lock; 1099 spinlock_t tx_irq_lock; 1100 spinlock_t rx_irq_lock; 1101 struct net_device dummy_dev; 1102 struct net_device *netdev[MTK_MAX_DEVS]; 1103 struct mtk_mac *mac[MTK_MAX_DEVS]; 1104 int irq[3]; 1105 u32 msg_enable; 1106 unsigned long sysclk; 1107 struct regmap *ethsys; 1108 struct regmap *infra; 1109 struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS]; 1110 struct regmap *pctl; 1111 bool hwlro; 1112 refcount_t dma_refcnt; 1113 struct mtk_tx_ring tx_ring; 1114 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; 1115 struct mtk_rx_ring rx_ring_qdma; 1116 struct napi_struct tx_napi; 1117 struct napi_struct rx_napi; 1118 void *scratch_ring; 1119 dma_addr_t phy_scratch_ring; 1120 void *scratch_head; 1121 struct clk *clks[MTK_CLK_MAX]; 1122 1123 struct mii_bus *mii_bus; 1124 struct work_struct pending_work; 1125 unsigned long state; 1126 1127 const struct mtk_soc_data *soc; 1128 1129 spinlock_t dim_lock; 1130 1131 u32 rx_events; 1132 u32 rx_packets; 1133 u32 rx_bytes; 1134 struct dim rx_dim; 1135 1136 u32 tx_events; 1137 u32 tx_packets; 1138 u32 tx_bytes; 1139 struct dim tx_dim; 1140 1141 int ip_align; 1142 1143 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; 1144 1145 struct mtk_ppe *ppe[2]; 1146 struct rhashtable flow_table; 1147 1148 struct bpf_prog __rcu *prog; 1149 1150 struct { 1151 struct delayed_work monitor_work; 1152 u32 wdidx; 1153 u8 wdma_hang_count; 1154 u8 qdma_hang_count; 1155 u8 adma_hang_count; 1156 } reset; 1157 }; 1158 1159 /* struct mtk_mac - the structure that holds the info about the MACs of the 1160 * SoC 1161 * @id: The number of the MAC 1162 * @interface: Interface mode kept for detecting change in hw settings 1163 * @of_node: Our devicetree node 1164 * @hw: Backpointer to our main datastruture 1165 * @hw_stats: Packet statistics counter 1166 */ 1167 struct mtk_mac { 1168 int id; 1169 phy_interface_t interface; 1170 int speed; 1171 struct device_node *of_node; 1172 struct phylink *phylink; 1173 struct phylink_config phylink_config; 1174 struct mtk_eth *hw; 1175 struct mtk_hw_stats *hw_stats; 1176 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; 1177 int hwlro_ip_cnt; 1178 unsigned int syscfg0; 1179 struct notifier_block device_notifier; 1180 }; 1181 1182 /* the struct describing the SoC. these are declared in the soc_xyz.c files */ 1183 extern const struct of_device_id of_mtk_match[]; 1184 1185 static inline struct mtk_foe_entry * 1186 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) 1187 { 1188 const struct mtk_soc_data *soc = ppe->eth->soc; 1189 1190 return ppe->foe_table + hash * soc->foe_entry_size; 1191 } 1192 1193 static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) 1194 { 1195 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1196 return MTK_FOE_IB1_BIND_TIMESTAMP_V2; 1197 1198 return MTK_FOE_IB1_BIND_TIMESTAMP; 1199 } 1200 1201 static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) 1202 { 1203 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1204 return MTK_FOE_IB1_BIND_PPPOE_V2; 1205 1206 return MTK_FOE_IB1_BIND_PPPOE; 1207 } 1208 1209 static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) 1210 { 1211 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1212 return MTK_FOE_IB1_BIND_VLAN_TAG_V2; 1213 1214 return MTK_FOE_IB1_BIND_VLAN_TAG; 1215 } 1216 1217 static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) 1218 { 1219 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1220 return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; 1221 1222 return MTK_FOE_IB1_BIND_VLAN_LAYER; 1223 } 1224 1225 static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1226 { 1227 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1228 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1229 1230 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1231 } 1232 1233 static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1234 { 1235 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1236 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1237 1238 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1239 } 1240 1241 static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) 1242 { 1243 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1244 return MTK_FOE_IB1_PACKET_TYPE_V2; 1245 1246 return MTK_FOE_IB1_PACKET_TYPE; 1247 } 1248 1249 static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) 1250 { 1251 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1252 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); 1253 1254 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); 1255 } 1256 1257 static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) 1258 { 1259 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1260 return MTK_FOE_IB2_MULTICAST_V2; 1261 1262 return MTK_FOE_IB2_MULTICAST; 1263 } 1264 1265 /* read the hardware status register */ 1266 void mtk_stats_update_mac(struct mtk_mac *mac); 1267 1268 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); 1269 u32 mtk_r32(struct mtk_eth *eth, unsigned reg); 1270 1271 int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); 1272 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); 1273 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); 1274 1275 int mtk_eth_offload_init(struct mtk_eth *eth); 1276 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 1277 void *type_data); 1278 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, 1279 int ppe_index); 1280 void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list); 1281 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); 1282 1283 1284 #endif /* MTK_ETH_H */ 1285