1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> 5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> 6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> 7 */ 8 9 #ifndef MTK_ETH_H 10 #define MTK_ETH_H 11 12 #include <linux/dma-mapping.h> 13 #include <linux/netdevice.h> 14 #include <linux/of_net.h> 15 #include <linux/u64_stats_sync.h> 16 #include <linux/refcount.h> 17 #include <linux/phylink.h> 18 #include <linux/rhashtable.h> 19 #include <linux/dim.h> 20 #include <linux/bitfield.h> 21 #include <net/page_pool/types.h> 22 #include <linux/bpf_trace.h> 23 #include "mtk_ppe.h" 24 25 #define MTK_MAX_DSA_PORTS 7 26 #define MTK_DSA_PORT_MASK GENMASK(2, 0) 27 28 #define MTK_QDMA_NUM_QUEUES 16 29 #define MTK_QDMA_PAGE_SIZE 2048 30 #define MTK_MAX_RX_LENGTH 1536 31 #define MTK_MAX_RX_LENGTH_2K 2048 32 #define MTK_TX_DMA_BUF_LEN 0x3fff 33 #define MTK_TX_DMA_BUF_LEN_V2 0xffff 34 #define MTK_QDMA_RING_SIZE 2048 35 #define MTK_DMA_SIZE 512 36 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) 37 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) 38 #define MTK_DMA_DUMMY_DESC 0xffffffff 39 #define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ 40 NETIF_MSG_PROBE | \ 41 NETIF_MSG_LINK | \ 42 NETIF_MSG_TIMER | \ 43 NETIF_MSG_IFDOWN | \ 44 NETIF_MSG_IFUP | \ 45 NETIF_MSG_RX_ERR | \ 46 NETIF_MSG_TX_ERR) 47 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ 48 NETIF_F_RXCSUM | \ 49 NETIF_F_HW_VLAN_CTAG_TX | \ 50 NETIF_F_SG | NETIF_F_TSO | \ 51 NETIF_F_TSO6 | \ 52 NETIF_F_IPV6_CSUM |\ 53 NETIF_F_HW_TC) 54 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) 55 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) 56 57 #define MTK_PP_HEADROOM XDP_PACKET_HEADROOM 58 #define MTK_PP_PAD (MTK_PP_HEADROOM + \ 59 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 60 #define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD) 61 62 #define MTK_QRX_OFFSET 0x10 63 64 #define MTK_MAX_RX_RING_NUM 4 65 #define MTK_HW_LRO_DMA_SIZE 8 66 67 #define MTK_MAX_LRO_RX_LENGTH (4096 * 3) 68 #define MTK_MAX_LRO_IP_CNT 2 69 #define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */ 70 #define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */ 71 #define MTK_HW_LRO_AGG_TIME 10 /* 200us */ 72 #define MTK_HW_LRO_AGE_TIME 50 /* 1ms */ 73 #define MTK_HW_LRO_MAX_AGG_CNT 64 74 #define MTK_HW_LRO_BW_THRE 3000 75 #define MTK_HW_LRO_REPLACE_DELTA 1000 76 #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 77 78 /* Frame Engine Global Configuration */ 79 #define MTK_FE_GLO_CFG 0x00 80 #define MTK_FE_LINK_DOWN_P3 BIT(11) 81 #define MTK_FE_LINK_DOWN_P4 BIT(12) 82 83 /* Frame Engine Global Reset Register */ 84 #define MTK_RST_GL 0x04 85 #define RST_GL_PSE BIT(0) 86 87 /* Frame Engine Interrupt Status Register */ 88 #define MTK_INT_STATUS2 0x08 89 #define MTK_FE_INT_ENABLE 0x0c 90 #define MTK_FE_INT_FQ_EMPTY BIT(8) 91 #define MTK_FE_INT_TSO_FAIL BIT(12) 92 #define MTK_FE_INT_TSO_ILLEGAL BIT(13) 93 #define MTK_FE_INT_TSO_ALIGN BIT(14) 94 #define MTK_FE_INT_RFIFO_OV BIT(18) 95 #define MTK_FE_INT_RFIFO_UF BIT(19) 96 #define MTK_GDM1_AF BIT(28) 97 #define MTK_GDM2_AF BIT(29) 98 99 /* PDMA HW LRO Alter Flow Timer Register */ 100 #define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c 101 102 /* Frame Engine Interrupt Grouping Register */ 103 #define MTK_FE_INT_GRP 0x20 104 105 /* CDMP Ingress Control Register */ 106 #define MTK_CDMQ_IG_CTRL 0x1400 107 #define MTK_CDMQ_STAG_EN BIT(0) 108 109 /* CDMQ Exgress Control Register */ 110 #define MTK_CDMQ_EG_CTRL 0x1404 111 112 /* CDMP Ingress Control Register */ 113 #define MTK_CDMP_IG_CTRL 0x400 114 #define MTK_CDMP_STAG_EN BIT(0) 115 116 /* CDMP Exgress Control Register */ 117 #define MTK_CDMP_EG_CTRL 0x404 118 119 /* GDM Exgress Control Register */ 120 #define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 121 0x540 : 0x500 + (_x * 0x1000); }) 122 #define MTK_GDMA_SPECIAL_TAG BIT(24) 123 #define MTK_GDMA_ICS_EN BIT(22) 124 #define MTK_GDMA_TCS_EN BIT(21) 125 #define MTK_GDMA_UCS_EN BIT(20) 126 #define MTK_GDMA_STRP_CRC BIT(16) 127 #define MTK_GDMA_TO_PDMA 0x0 128 #define MTK_GDMA_DROP_ALL 0x7777 129 130 /* GDM Egress Control Register */ 131 #define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 132 0x544 : 0x504 + (_x * 0x1000); }) 133 #define MTK_GDMA_XGDM_SEL BIT(31) 134 135 /* Unicast Filter MAC Address Register - Low */ 136 #define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 137 0x548 : 0x508 + (_x * 0x1000); }) 138 139 /* Unicast Filter MAC Address Register - High */ 140 #define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ 141 0x54C : 0x50C + (_x * 0x1000); }) 142 143 /* FE global misc reg*/ 144 #define MTK_FE_GLO_MISC 0x124 145 146 /* PSE Free Queue Flow Control */ 147 #define PSE_FQFC_CFG1 0x100 148 #define PSE_FQFC_CFG2 0x104 149 #define PSE_DROP_CFG 0x108 150 #define PSE_PPE0_DROP 0x110 151 152 /* PSE Input Queue Reservation Register*/ 153 #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) 154 155 /* PSE Output Queue Threshold Register*/ 156 #define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) 157 158 /* GDM and CDM Threshold */ 159 #define MTK_GDM2_THRES 0x1530 160 #define MTK_CDMW0_THRES 0x164c 161 #define MTK_CDMW1_THRES 0x1650 162 #define MTK_CDME0_THRES 0x1654 163 #define MTK_CDME1_THRES 0x1658 164 #define MTK_CDMM_THRES 0x165c 165 166 /* PDMA HW LRO Control Registers */ 167 #define MTK_PDMA_LRO_CTRL_DW0 0x980 168 #define MTK_LRO_EN BIT(0) 169 #define MTK_L3_CKS_UPD_EN BIT(7) 170 #define MTK_L3_CKS_UPD_EN_V2 BIT(19) 171 #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) 172 #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) 173 #define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) 174 #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) 175 #define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) 176 177 #define MTK_PDMA_LRO_CTRL_DW1 0x984 178 #define MTK_PDMA_LRO_CTRL_DW2 0x988 179 #define MTK_PDMA_LRO_CTRL_DW3 0x98c 180 #define MTK_ADMA_MODE BIT(15) 181 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) 182 183 #define MTK_RX_DMA_LRO_EN BIT(8) 184 #define MTK_MULTI_EN BIT(10) 185 #define MTK_PDMA_SIZE_8DWORDS (1 << 4) 186 187 /* PDMA Global Configuration Register */ 188 #define MTK_PDMA_LRO_SDL 0x3000 189 #define MTK_RX_CFG_SDL_OFFSET 16 190 191 /* PDMA Reset Index Register */ 192 #define MTK_PST_DRX_IDX0 BIT(16) 193 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) 194 195 /* PDMA Delay Interrupt Register */ 196 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) 197 #define MTK_PDMA_DELAY_RX_EN BIT(15) 198 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 199 #define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 200 201 #define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) 202 #define MTK_PDMA_DELAY_TX_EN BIT(31) 203 #define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 204 #define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 205 206 #define MTK_PDMA_DELAY_PINT_MASK 0x7f 207 #define MTK_PDMA_DELAY_PTIME_MASK 0xff 208 209 /* PDMA HW LRO Alter Flow Delta Register */ 210 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c 211 212 /* PDMA HW LRO IP Setting Registers */ 213 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04 214 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) 215 #define MTK_RING_MYIP_VLD BIT(9) 216 217 /* PDMA HW LRO Ring Control Registers */ 218 #define MTK_LRO_RX_RING0_CTRL_DW1 0xb28 219 #define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c 220 #define MTK_LRO_RX_RING0_CTRL_DW3 0xb30 221 #define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40)) 222 #define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40)) 223 #define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40)) 224 #define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22) 225 #define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f) 226 #define MTK_RING_AUTO_LERAN_MODE (3 << 6) 227 #define MTK_RING_VLD BIT(8) 228 #define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10) 229 #define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26) 230 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) 231 232 /* QDMA TX Queue Configuration Registers */ 233 #define MTK_QTX_OFFSET 0x10 234 #define QDMA_RES_THRES 4 235 236 /* QDMA Tx Queue Scheduler Configuration Registers */ 237 #define MTK_QTX_SCH_TX_SEL BIT(31) 238 #define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30) 239 240 #define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30) 241 #define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28) 242 #define MTK_QTX_SCH_MIN_RATE_EN BIT(27) 243 #define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20) 244 #define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16) 245 #define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12) 246 #define MTK_QTX_SCH_MAX_RATE_EN BIT(11) 247 #define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4) 248 #define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0) 249 250 /* QDMA TX Scheduler Rate Control Register */ 251 #define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15) 252 253 /* QDMA Global Configuration Register */ 254 #define MTK_RX_2B_OFFSET BIT(31) 255 #define MTK_RX_BT_32DWORDS (3 << 11) 256 #define MTK_NDP_CO_PRO BIT(10) 257 #define MTK_TX_WB_DDONE BIT(6) 258 #define MTK_TX_BT_32DWORDS (3 << 4) 259 #define MTK_RX_DMA_BUSY BIT(3) 260 #define MTK_TX_DMA_BUSY BIT(1) 261 #define MTK_RX_DMA_EN BIT(2) 262 #define MTK_TX_DMA_EN BIT(0) 263 #define MTK_DMA_BUSY_TIMEOUT_US 1000000 264 265 /* QDMA V2 Global Configuration Register */ 266 #define MTK_CHK_DDONE_EN BIT(28) 267 #define MTK_DMAD_WR_WDONE BIT(26) 268 #define MTK_WCOMP_EN BIT(24) 269 #define MTK_RESV_BUF (0x40 << 16) 270 #define MTK_MUTLI_CNT (0x4 << 12) 271 #define MTK_LEAKY_BUCKET_EN BIT(11) 272 273 /* QDMA Flow Control Register */ 274 #define FC_THRES_DROP_MODE BIT(20) 275 #define FC_THRES_DROP_EN (7 << 16) 276 #define FC_THRES_MIN 0x4444 277 278 /* QDMA Interrupt Status Register */ 279 #define MTK_RX_DONE_DLY BIT(30) 280 #define MTK_TX_DONE_DLY BIT(28) 281 #define MTK_RX_DONE_INT3 BIT(19) 282 #define MTK_RX_DONE_INT2 BIT(18) 283 #define MTK_RX_DONE_INT1 BIT(17) 284 #define MTK_RX_DONE_INT0 BIT(16) 285 #define MTK_TX_DONE_INT3 BIT(3) 286 #define MTK_TX_DONE_INT2 BIT(2) 287 #define MTK_TX_DONE_INT1 BIT(1) 288 #define MTK_TX_DONE_INT0 BIT(0) 289 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY 290 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY 291 292 #define MTK_RX_DONE_INT_V2 BIT(14) 293 294 #define MTK_CDM_TXFIFO_RDY BIT(7) 295 296 /* QDMA Interrupt grouping registers */ 297 #define MTK_RLS_DONE_INT BIT(0) 298 299 /* QDMA TX NUM */ 300 #define QID_BITS_V2(x) (((x) & 0x3f) << 16) 301 #define MTK_QDMA_GMAC2_QID 8 302 303 #define MTK_TX_DMA_BUF_SHIFT 8 304 305 /* QDMA V2 descriptor txd6 */ 306 #define TX_DMA_INS_VLAN_V2 BIT(16) 307 /* QDMA V2 descriptor txd5 */ 308 #define TX_DMA_CHKSUM_V2 (0x7 << 28) 309 #define TX_DMA_TSO_V2 BIT(31) 310 311 #define TX_DMA_SPTAG_V3 BIT(27) 312 313 /* QDMA V2 descriptor txd4 */ 314 #define TX_DMA_FPORT_SHIFT_V2 8 315 #define TX_DMA_FPORT_MASK_V2 0xf 316 #define TX_DMA_SWC_V2 BIT(30) 317 318 /* QDMA descriptor txd4 */ 319 #define TX_DMA_CHKSUM (0x7 << 29) 320 #define TX_DMA_TSO BIT(28) 321 #define TX_DMA_FPORT_SHIFT 25 322 #define TX_DMA_FPORT_MASK 0x7 323 #define TX_DMA_INS_VLAN BIT(16) 324 325 /* QDMA descriptor txd3 */ 326 #define TX_DMA_OWNER_CPU BIT(31) 327 #define TX_DMA_LS0 BIT(30) 328 #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 329 #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) 330 #define TX_DMA_SWC BIT(14) 331 #define TX_DMA_PQID GENMASK(3, 0) 332 333 /* PDMA on MT7628 */ 334 #define TX_DMA_DONE BIT(31) 335 #define TX_DMA_LS1 BIT(14) 336 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) 337 338 /* QDMA descriptor rxd2 */ 339 #define RX_DMA_DONE BIT(31) 340 #define RX_DMA_LSO BIT(30) 341 #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) 342 #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) 343 #define RX_DMA_VTAG BIT(15) 344 345 /* QDMA descriptor rxd3 */ 346 #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) 347 #define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) 348 #define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) 349 350 /* QDMA descriptor rxd4 */ 351 #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) 352 #define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) 353 #define MTK_RXD4_SRC_PORT GENMASK(21, 19) 354 #define MTK_RXD4_ALG GENMASK(31, 22) 355 356 /* QDMA descriptor rxd4 */ 357 #define RX_DMA_L4_VALID BIT(24) 358 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ 359 #define RX_DMA_SPECIAL_TAG BIT(22) 360 361 /* PDMA descriptor rxd5 */ 362 #define MTK_RXD5_FOE_ENTRY GENMASK(14, 0) 363 #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) 364 #define MTK_RXD5_SRC_PORT GENMASK(29, 26) 365 366 #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) 367 #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) 368 369 /* PDMA V2 descriptor rxd3 */ 370 #define RX_DMA_VTAG_V2 BIT(0) 371 #define RX_DMA_L4_VALID_V2 BIT(2) 372 373 /* PHY Polling and SMI Master Control registers */ 374 #define MTK_PPSC 0x10000 375 #define PPSC_MDC_CFG GENMASK(29, 24) 376 #define PPSC_MDC_TURBO BIT(20) 377 #define MDC_MAX_FREQ 25000000 378 #define MDC_MAX_DIVIDER 63 379 380 /* PHY Indirect Access Control registers */ 381 #define MTK_PHY_IAC 0x10004 382 #define PHY_IAC_ACCESS BIT(31) 383 #define PHY_IAC_REG_MASK GENMASK(29, 25) 384 #define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x)) 385 #define PHY_IAC_ADDR_MASK GENMASK(24, 20) 386 #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) 387 #define PHY_IAC_CMD_MASK GENMASK(19, 18) 388 #define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0) 389 #define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1) 390 #define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2) 391 #define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3) 392 #define PHY_IAC_START_MASK GENMASK(17, 16) 393 #define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0) 394 #define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1) 395 #define PHY_IAC_DATA_MASK GENMASK(15, 0) 396 #define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x)) 397 #define PHY_IAC_TIMEOUT HZ 398 399 #define MTK_MAC_MISC 0x1000c 400 #define MTK_MAC_MISC_V3 0x10010 401 #define MTK_MUX_TO_ESW BIT(0) 402 #define MISC_MDC_TURBO BIT(4) 403 404 /* XMAC status registers */ 405 #define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C) 406 #define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15)) 407 #define MTK_USXGMII_PCS_LINK BIT(8) 408 #define MTK_XGMAC_RX_FC BIT(5) 409 #define MTK_XGMAC_TX_FC BIT(4) 410 #define MTK_USXGMII_PCS_MODE GENMASK(3, 1) 411 #define MTK_XGMAC_LINK_STS BIT(0) 412 413 /* GSW bridge registers */ 414 #define MTK_GSW_CFG (0x10080) 415 #define GSWTX_IPG_MASK GENMASK(19, 16) 416 #define GSWTX_IPG_SHIFT 16 417 #define GSWRX_IPG_MASK GENMASK(3, 0) 418 #define GSWRX_IPG_SHIFT 0 419 #define GSW_IPG_11 11 420 421 /* Mac control registers */ 422 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) 423 #define MAC_MCR_MAX_RX_MASK GENMASK(25, 24) 424 #define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24)) 425 #define MAC_MCR_MAX_RX_1518 0x0 426 #define MAC_MCR_MAX_RX_1536 0x1 427 #define MAC_MCR_MAX_RX_1552 0x2 428 #define MAC_MCR_MAX_RX_2048 0x3 429 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) 430 #define MAC_MCR_FORCE_MODE BIT(15) 431 #define MAC_MCR_TX_EN BIT(14) 432 #define MAC_MCR_RX_EN BIT(13) 433 #define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) 434 #define MAC_MCR_BACKOFF_EN BIT(9) 435 #define MAC_MCR_BACKPR_EN BIT(8) 436 #define MAC_MCR_FORCE_RX_FC BIT(5) 437 #define MAC_MCR_FORCE_TX_FC BIT(4) 438 #define MAC_MCR_SPEED_1000 BIT(3) 439 #define MAC_MCR_SPEED_100 BIT(2) 440 #define MAC_MCR_FORCE_DPX BIT(1) 441 #define MAC_MCR_FORCE_LINK BIT(0) 442 #define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) 443 444 /* Mac status registers */ 445 #define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) 446 #define MAC_MSR_EEE1G BIT(7) 447 #define MAC_MSR_EEE100M BIT(6) 448 #define MAC_MSR_RX_FC BIT(5) 449 #define MAC_MSR_TX_FC BIT(4) 450 #define MAC_MSR_SPEED_1000 BIT(3) 451 #define MAC_MSR_SPEED_100 BIT(2) 452 #define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) 453 #define MAC_MSR_DPX BIT(1) 454 #define MAC_MSR_LINK BIT(0) 455 456 /* TRGMII RXC control register */ 457 #define TRGMII_RCK_CTRL 0x10300 458 #define DQSI0(x) ((x << 0) & GENMASK(6, 0)) 459 #define DQSI1(x) ((x << 8) & GENMASK(14, 8)) 460 #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 461 #define RXC_RST BIT(31) 462 #define RXC_DQSISEL BIT(30) 463 #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) 464 #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) 465 466 #define NUM_TRGMII_CTRL 5 467 468 /* TRGMII RXC control register */ 469 #define TRGMII_TCK_CTRL 0x10340 470 #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) 471 #define TXC_INV BIT(30) 472 #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) 473 #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) 474 475 /* TRGMII TX Drive Strength */ 476 #define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) 477 #define TD_DM_DRVP(x) ((x) & 0xf) 478 #define TD_DM_DRVN(x) (((x) & 0xf) << 4) 479 480 /* TRGMII Interface mode register */ 481 #define INTF_MODE 0x10390 482 #define TRGMII_INTF_DIS BIT(0) 483 #define TRGMII_MODE BIT(1) 484 #define TRGMII_CENTRAL_ALIGNED BIT(2) 485 #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) 486 #define INTF_MODE_RGMII_10_100 0 487 488 /* GPIO port control registers for GMAC 2*/ 489 #define GPIO_OD33_CTRL8 0x4c0 490 #define GPIO_BIAS_CTRL 0xed0 491 #define GPIO_DRV_SEL10 0xf00 492 493 /* ethernet subsystem chip id register */ 494 #define ETHSYS_CHIPID0_3 0x0 495 #define ETHSYS_CHIPID4_7 0x4 496 #define MT7623_ETH 7623 497 #define MT7622_ETH 7622 498 #define MT7621_ETH 7621 499 500 /* ethernet system control register */ 501 #define ETHSYS_SYSCFG 0x10 502 #define SYSCFG_DRAM_TYPE_DDR2 BIT(4) 503 504 /* ethernet subsystem config register */ 505 #define ETHSYS_SYSCFG0 0x14 506 #define SYSCFG0_GE_MASK 0x3 507 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) 508 #define SYSCFG0_SGMII_MASK GENMASK(9, 7) 509 #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) 510 #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) 511 #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) 512 #define SYSCFG0_SGMII_GMAC2_V2 BIT(8) 513 514 515 /* ethernet subsystem clock register */ 516 #define ETHSYS_CLKCFG0 0x2c 517 #define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) 518 #define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6)) 519 #define ETHSYS_TRGMII_MT7621_APLL BIT(6) 520 #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) 521 522 /* ethernet reset control register */ 523 #define ETHSYS_RSTCTRL 0x34 524 #define RSTCTRL_FE BIT(6) 525 #define RSTCTRL_PPE0 BIT(31) 526 #define RSTCTRL_PPE0_V2 BIT(30) 527 #define RSTCTRL_PPE1 BIT(31) 528 #define RSTCTRL_ETH BIT(23) 529 530 /* ethernet reset check idle register */ 531 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 532 533 /* ethernet dma channel agent map */ 534 #define ETHSYS_DMA_AG_MAP 0x408 535 #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) 536 #define ETHSYS_DMA_AG_MAP_QDMA BIT(1) 537 #define ETHSYS_DMA_AG_MAP_PPE BIT(2) 538 539 /* Infrasys subsystem config registers */ 540 #define INFRA_MISC2 0x70c 541 #define CO_QPHY_SEL BIT(0) 542 #define GEPHY_MAC_SEL BIT(1) 543 544 /* Top misc registers */ 545 #define USB_PHY_SWITCH_REG 0x218 546 #define QPHY_SEL_MASK GENMASK(1, 0) 547 #define SGMII_QPHY_SEL 0x2 548 549 /* MT7628/88 specific stuff */ 550 #define MT7628_PDMA_OFFSET 0x0800 551 #define MT7628_SDM_OFFSET 0x0c00 552 553 #define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) 554 #define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) 555 #define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) 556 #define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) 557 #define MT7628_PST_DTX_IDX0 BIT(0) 558 559 #define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) 560 #define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) 561 562 /* Counter / stat register */ 563 #define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100) 564 #define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104) 565 #define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108) 566 #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) 567 #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) 568 569 #define MTK_FE_CDM1_FSM 0x220 570 #define MTK_FE_CDM2_FSM 0x224 571 #define MTK_FE_CDM3_FSM 0x238 572 #define MTK_FE_CDM4_FSM 0x298 573 #define MTK_FE_CDM5_FSM 0x318 574 #define MTK_FE_CDM6_FSM 0x328 575 #define MTK_FE_GDM1_FSM 0x228 576 #define MTK_FE_GDM2_FSM 0x22C 577 578 #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) 579 580 struct mtk_rx_dma { 581 unsigned int rxd1; 582 unsigned int rxd2; 583 unsigned int rxd3; 584 unsigned int rxd4; 585 } __packed __aligned(4); 586 587 struct mtk_rx_dma_v2 { 588 unsigned int rxd1; 589 unsigned int rxd2; 590 unsigned int rxd3; 591 unsigned int rxd4; 592 unsigned int rxd5; 593 unsigned int rxd6; 594 unsigned int rxd7; 595 unsigned int rxd8; 596 } __packed __aligned(4); 597 598 struct mtk_tx_dma { 599 unsigned int txd1; 600 unsigned int txd2; 601 unsigned int txd3; 602 unsigned int txd4; 603 } __packed __aligned(4); 604 605 struct mtk_tx_dma_v2 { 606 unsigned int txd1; 607 unsigned int txd2; 608 unsigned int txd3; 609 unsigned int txd4; 610 unsigned int txd5; 611 unsigned int txd6; 612 unsigned int txd7; 613 unsigned int txd8; 614 } __packed __aligned(4); 615 616 struct mtk_eth; 617 struct mtk_mac; 618 619 struct mtk_xdp_stats { 620 u64 rx_xdp_redirect; 621 u64 rx_xdp_pass; 622 u64 rx_xdp_drop; 623 u64 rx_xdp_tx; 624 u64 rx_xdp_tx_errors; 625 u64 tx_xdp_xmit; 626 u64 tx_xdp_xmit_errors; 627 }; 628 629 /* struct mtk_hw_stats - the structure that holds the traffic statistics. 630 * @stats_lock: make sure that stats operations are atomic 631 * @reg_offset: the status register offset of the SoC 632 * @syncp: the refcount 633 * 634 * All of the supported SoCs have hardware counters for traffic statistics. 635 * Whenever the status IRQ triggers we can read the latest stats from these 636 * counters and store them in this struct. 637 */ 638 struct mtk_hw_stats { 639 u64 tx_bytes; 640 u64 tx_packets; 641 u64 tx_skip; 642 u64 tx_collisions; 643 u64 rx_bytes; 644 u64 rx_packets; 645 u64 rx_overflow; 646 u64 rx_fcs_errors; 647 u64 rx_short_errors; 648 u64 rx_long_errors; 649 u64 rx_checksum_errors; 650 u64 rx_flow_control_packets; 651 652 struct mtk_xdp_stats xdp_stats; 653 654 spinlock_t stats_lock; 655 u32 reg_offset; 656 struct u64_stats_sync syncp; 657 }; 658 659 enum mtk_tx_flags { 660 /* PDMA descriptor can point at 1-2 segments. This enum allows us to 661 * track how memory was allocated so that it can be freed properly. 662 */ 663 MTK_TX_FLAGS_SINGLE0 = 0x01, 664 MTK_TX_FLAGS_PAGE0 = 0x02, 665 }; 666 667 /* This enum allows us to identify how the clock is defined on the array of the 668 * clock in the order 669 */ 670 enum mtk_clks_map { 671 MTK_CLK_ETHIF, 672 MTK_CLK_SGMIITOP, 673 MTK_CLK_ESW, 674 MTK_CLK_GP0, 675 MTK_CLK_GP1, 676 MTK_CLK_GP2, 677 MTK_CLK_GP3, 678 MTK_CLK_XGP1, 679 MTK_CLK_XGP2, 680 MTK_CLK_XGP3, 681 MTK_CLK_CRYPTO, 682 MTK_CLK_FE, 683 MTK_CLK_TRGPLL, 684 MTK_CLK_SGMII_TX_250M, 685 MTK_CLK_SGMII_RX_250M, 686 MTK_CLK_SGMII_CDR_REF, 687 MTK_CLK_SGMII_CDR_FB, 688 MTK_CLK_SGMII2_TX_250M, 689 MTK_CLK_SGMII2_RX_250M, 690 MTK_CLK_SGMII2_CDR_REF, 691 MTK_CLK_SGMII2_CDR_FB, 692 MTK_CLK_SGMII_CK, 693 MTK_CLK_ETH2PLL, 694 MTK_CLK_WOCPU0, 695 MTK_CLK_WOCPU1, 696 MTK_CLK_NETSYS0, 697 MTK_CLK_NETSYS1, 698 MTK_CLK_ETHWARP_WOCPU2, 699 MTK_CLK_ETHWARP_WOCPU1, 700 MTK_CLK_ETHWARP_WOCPU0, 701 MTK_CLK_TOP_USXGMII_SBUS_0_SEL, 702 MTK_CLK_TOP_USXGMII_SBUS_1_SEL, 703 MTK_CLK_TOP_SGM_0_SEL, 704 MTK_CLK_TOP_SGM_1_SEL, 705 MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL, 706 MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL, 707 MTK_CLK_TOP_ETH_GMII_SEL, 708 MTK_CLK_TOP_ETH_REFCK_50M_SEL, 709 MTK_CLK_TOP_ETH_SYS_200M_SEL, 710 MTK_CLK_TOP_ETH_SYS_SEL, 711 MTK_CLK_TOP_ETH_XGMII_SEL, 712 MTK_CLK_TOP_ETH_MII_SEL, 713 MTK_CLK_TOP_NETSYS_SEL, 714 MTK_CLK_TOP_NETSYS_500M_SEL, 715 MTK_CLK_TOP_NETSYS_PAO_2X_SEL, 716 MTK_CLK_TOP_NETSYS_SYNC_250M_SEL, 717 MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL, 718 MTK_CLK_TOP_NETSYS_WARP_SEL, 719 MTK_CLK_MAX 720 }; 721 722 #define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 723 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ 724 BIT_ULL(MTK_CLK_TRGPLL)) 725 #define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 726 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ 727 BIT_ULL(MTK_CLK_GP2) | \ 728 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 729 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 730 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 731 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 732 BIT_ULL(MTK_CLK_SGMII_CK) | \ 733 BIT_ULL(MTK_CLK_ETH2PLL)) 734 #define MT7621_CLKS_BITMAP (0) 735 #define MT7628_CLKS_BITMAP (0) 736 #define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ 737 BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ 738 BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \ 739 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 740 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 741 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 742 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 743 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 744 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 745 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 746 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ 747 BIT_ULL(MTK_CLK_SGMII_CK) | \ 748 BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP)) 749 #define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ 750 BIT_ULL(MTK_CLK_GP1) | \ 751 BIT_ULL(MTK_CLK_WOCPU0) | \ 752 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 753 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 754 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 755 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 756 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 757 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 758 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 759 BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ 760 BIT_ULL(MTK_CLK_SGMII_CK)) 761 #define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ 762 BIT_ULL(MTK_CLK_GP1) | \ 763 BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \ 764 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 765 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 766 BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ 767 BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ 768 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 769 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 770 BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ 771 BIT_ULL(MTK_CLK_SGMII2_CDR_FB)) 772 #define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \ 773 BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ 774 BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \ 775 BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \ 776 BIT_ULL(MTK_CLK_CRYPTO) | \ 777 BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ 778 BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ 779 BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ 780 BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ 781 BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \ 782 BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \ 783 BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \ 784 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \ 785 BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \ 786 BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \ 787 BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \ 788 BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \ 789 BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \ 790 BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \ 791 BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \ 792 BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \ 793 BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \ 794 BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \ 795 BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \ 796 BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \ 797 BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \ 798 BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \ 799 BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \ 800 BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \ 801 BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL)) 802 803 enum mtk_dev_state { 804 MTK_HW_INIT, 805 MTK_RESETTING 806 }; 807 808 /* PSE Port Definition */ 809 enum mtk_pse_port { 810 PSE_ADMA_PORT = 0, 811 PSE_GDM1_PORT, 812 PSE_GDM2_PORT, 813 PSE_PPE0_PORT, 814 PSE_PPE1_PORT, 815 PSE_QDMA_TX_PORT, 816 PSE_QDMA_RX_PORT, 817 PSE_DROP_PORT, 818 PSE_WDMA0_PORT, 819 PSE_WDMA1_PORT, 820 PSE_TDMA_PORT, 821 PSE_NONE_PORT, 822 PSE_PPE2_PORT, 823 PSE_WDMA2_PORT, 824 PSE_EIP197_PORT, 825 PSE_GDM3_PORT, 826 PSE_PORT_MAX 827 }; 828 829 /* GMAC Identifier */ 830 enum mtk_gmac_id { 831 MTK_GMAC1_ID = 0, 832 MTK_GMAC2_ID, 833 MTK_GMAC3_ID, 834 MTK_GMAC_ID_MAX 835 }; 836 837 enum mtk_tx_buf_type { 838 MTK_TYPE_SKB, 839 MTK_TYPE_XDP_TX, 840 MTK_TYPE_XDP_NDO, 841 }; 842 843 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at 844 * by the TX descriptor s 845 * @skb: The SKB pointer of the packet being sent 846 * @dma_addr0: The base addr of the first segment 847 * @dma_len0: The length of the first segment 848 * @dma_addr1: The base addr of the second segment 849 * @dma_len1: The length of the second segment 850 */ 851 struct mtk_tx_buf { 852 enum mtk_tx_buf_type type; 853 void *data; 854 855 u16 mac_id; 856 u16 flags; 857 DEFINE_DMA_UNMAP_ADDR(dma_addr0); 858 DEFINE_DMA_UNMAP_LEN(dma_len0); 859 DEFINE_DMA_UNMAP_ADDR(dma_addr1); 860 DEFINE_DMA_UNMAP_LEN(dma_len1); 861 }; 862 863 /* struct mtk_tx_ring - This struct holds info describing a TX ring 864 * @dma: The descriptor ring 865 * @buf: The memory pointed at by the ring 866 * @phys: The physical addr of tx_buf 867 * @next_free: Pointer to the next free descriptor 868 * @last_free: Pointer to the last free descriptor 869 * @last_free_ptr: Hardware pointer value of the last free descriptor 870 * @thresh: The threshold of minimum amount of free descriptors 871 * @free_count: QDMA uses a linked list. Track how many free descriptors 872 * are present 873 */ 874 struct mtk_tx_ring { 875 void *dma; 876 struct mtk_tx_buf *buf; 877 dma_addr_t phys; 878 struct mtk_tx_dma *next_free; 879 struct mtk_tx_dma *last_free; 880 u32 last_free_ptr; 881 u16 thresh; 882 atomic_t free_count; 883 int dma_size; 884 struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ 885 dma_addr_t phys_pdma; 886 int cpu_idx; 887 }; 888 889 /* PDMA rx ring mode */ 890 enum mtk_rx_flags { 891 MTK_RX_FLAGS_NORMAL = 0, 892 MTK_RX_FLAGS_HWLRO, 893 MTK_RX_FLAGS_QDMA, 894 }; 895 896 /* struct mtk_rx_ring - This struct holds info describing a RX ring 897 * @dma: The descriptor ring 898 * @data: The memory pointed at by the ring 899 * @phys: The physical addr of rx_buf 900 * @frag_size: How big can each fragment be 901 * @buf_size: The size of each packet buffer 902 * @calc_idx: The current head of ring 903 */ 904 struct mtk_rx_ring { 905 void *dma; 906 u8 **data; 907 dma_addr_t phys; 908 u16 frag_size; 909 u16 buf_size; 910 u16 dma_size; 911 bool calc_idx_update; 912 u16 calc_idx; 913 u32 crx_idx_reg; 914 /* page_pool */ 915 struct page_pool *page_pool; 916 struct xdp_rxq_info xdp_q; 917 }; 918 919 enum mkt_eth_capabilities { 920 MTK_RGMII_BIT = 0, 921 MTK_TRGMII_BIT, 922 MTK_SGMII_BIT, 923 MTK_ESW_BIT, 924 MTK_GEPHY_BIT, 925 MTK_MUX_BIT, 926 MTK_INFRA_BIT, 927 MTK_SHARED_SGMII_BIT, 928 MTK_HWLRO_BIT, 929 MTK_SHARED_INT_BIT, 930 MTK_TRGMII_MT7621_CLK_BIT, 931 MTK_QDMA_BIT, 932 MTK_SOC_MT7628_BIT, 933 MTK_RSTCTRL_PPE1_BIT, 934 MTK_U3_COPHY_V2_BIT, 935 936 /* MUX BITS*/ 937 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, 938 MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, 939 MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, 940 MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, 941 MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, 942 943 /* PATH BITS */ 944 MTK_ETH_PATH_GMAC1_RGMII_BIT, 945 MTK_ETH_PATH_GMAC1_TRGMII_BIT, 946 MTK_ETH_PATH_GMAC1_SGMII_BIT, 947 MTK_ETH_PATH_GMAC2_RGMII_BIT, 948 MTK_ETH_PATH_GMAC2_SGMII_BIT, 949 MTK_ETH_PATH_GMAC2_GEPHY_BIT, 950 MTK_ETH_PATH_GDM1_ESW_BIT, 951 }; 952 953 /* Supported hardware group on SoCs */ 954 #define MTK_RGMII BIT_ULL(MTK_RGMII_BIT) 955 #define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT) 956 #define MTK_SGMII BIT_ULL(MTK_SGMII_BIT) 957 #define MTK_ESW BIT_ULL(MTK_ESW_BIT) 958 #define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT) 959 #define MTK_MUX BIT_ULL(MTK_MUX_BIT) 960 #define MTK_INFRA BIT_ULL(MTK_INFRA_BIT) 961 #define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT) 962 #define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT) 963 #define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT) 964 #define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT) 965 #define MTK_QDMA BIT_ULL(MTK_QDMA_BIT) 966 #define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT) 967 #define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT) 968 #define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT) 969 970 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ 971 BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) 972 #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ 973 BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) 974 #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ 975 BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) 976 #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 977 BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) 978 #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ 979 BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) 980 981 /* Supported path present on SoCs */ 982 #define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT) 983 #define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT) 984 #define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT) 985 #define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT) 986 #define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT) 987 #define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT) 988 #define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT) 989 990 #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) 991 #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) 992 #define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII) 993 #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) 994 #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) 995 #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) 996 #define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) 997 998 /* MUXes present on SoCs */ 999 /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ 1000 #define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX) 1001 1002 /* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */ 1003 #define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \ 1004 (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA) 1005 1006 /* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */ 1007 #define MTK_MUX_U3_GMAC2_TO_QPHY \ 1008 (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA) 1009 1010 /* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */ 1011 #define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ 1012 (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ 1013 MTK_SHARED_SGMII) 1014 1015 /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ 1016 #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ 1017 (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) 1018 1019 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) 1020 1021 #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ 1022 MTK_GMAC2_RGMII | MTK_SHARED_INT | \ 1023 MTK_TRGMII_MT7621_CLK | MTK_QDMA) 1024 1025 #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ 1026 MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ 1027 MTK_MUX_GDM1_TO_GMAC1_ESW | \ 1028 MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) 1029 1030 #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ 1031 MTK_QDMA) 1032 1033 #define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) 1034 1035 #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 1036 MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ 1037 MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ 1038 MTK_MUX_U3_GMAC2_TO_QPHY | \ 1039 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) 1040 1041 #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 1042 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 1043 MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ 1044 MTK_RSTCTRL_PPE1) 1045 1046 #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ 1047 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ 1048 MTK_RSTCTRL_PPE1) 1049 1050 #define MT7988_CAPS (MTK_GDM1_ESW | MTK_QDMA | MTK_RSTCTRL_PPE1) 1051 1052 struct mtk_tx_dma_desc_info { 1053 dma_addr_t addr; 1054 u32 size; 1055 u16 vlan_tci; 1056 u16 qid; 1057 u8 gso:1; 1058 u8 csum:1; 1059 u8 vlan:1; 1060 u8 first:1; 1061 u8 last:1; 1062 }; 1063 1064 struct mtk_reg_map { 1065 u32 tx_irq_mask; 1066 u32 tx_irq_status; 1067 struct { 1068 u32 rx_ptr; /* rx base pointer */ 1069 u32 rx_cnt_cfg; /* rx max count configuration */ 1070 u32 pcrx_ptr; /* rx cpu pointer */ 1071 u32 glo_cfg; /* global configuration */ 1072 u32 rst_idx; /* reset index */ 1073 u32 delay_irq; /* delay interrupt */ 1074 u32 irq_status; /* interrupt status */ 1075 u32 irq_mask; /* interrupt mask */ 1076 u32 adma_rx_dbg0; 1077 u32 int_grp; 1078 } pdma; 1079 struct { 1080 u32 qtx_cfg; /* tx queue configuration */ 1081 u32 qtx_sch; /* tx queue scheduler configuration */ 1082 u32 rx_ptr; /* rx base pointer */ 1083 u32 rx_cnt_cfg; /* rx max count configuration */ 1084 u32 qcrx_ptr; /* rx cpu pointer */ 1085 u32 glo_cfg; /* global configuration */ 1086 u32 rst_idx; /* reset index */ 1087 u32 delay_irq; /* delay interrupt */ 1088 u32 fc_th; /* flow control */ 1089 u32 int_grp; 1090 u32 hred; /* interrupt mask */ 1091 u32 ctx_ptr; /* tx acquire cpu pointer */ 1092 u32 dtx_ptr; /* tx acquire dma pointer */ 1093 u32 crx_ptr; /* tx release cpu pointer */ 1094 u32 drx_ptr; /* tx release dma pointer */ 1095 u32 fq_head; /* fq head pointer */ 1096 u32 fq_tail; /* fq tail pointer */ 1097 u32 fq_count; /* fq free page count */ 1098 u32 fq_blen; /* fq free page buffer length */ 1099 u32 tx_sch_rate; /* tx scheduler rate control registers */ 1100 } qdma; 1101 u32 gdm1_cnt; 1102 u32 gdma_to_ppe; 1103 u32 ppe_base; 1104 u32 wdma_base[2]; 1105 u32 pse_iq_sta; 1106 u32 pse_oq_sta; 1107 }; 1108 1109 /* struct mtk_eth_data - This is the structure holding all differences 1110 * among various plaforms 1111 * @reg_map Soc register map. 1112 * @ana_rgc3: The offset for register ANA_RGC3 related to 1113 * sgmiisys syscon 1114 * @caps Flags shown the extra capability for the SoC 1115 * @hw_features Flags shown HW features 1116 * @required_clks Flags shown the bitmap for required clocks on 1117 * the target SoC 1118 * @required_pctl A bool value to show whether the SoC requires 1119 * the extra setup for those pins used by GMAC. 1120 * @hash_offset Flow table hash offset. 1121 * @version SoC version. 1122 * @foe_entry_size Foe table entry size. 1123 * @has_accounting Bool indicating support for accounting of 1124 * offloaded flows. 1125 * @txd_size Tx DMA descriptor size. 1126 * @rxd_size Rx DMA descriptor size. 1127 * @rx_irq_done_mask Rx irq done register mask. 1128 * @rx_dma_l4_valid Rx DMA valid register mask. 1129 * @dma_max_len Max DMA tx/rx buffer length. 1130 * @dma_len_offset Tx/Rx DMA length field offset. 1131 */ 1132 struct mtk_soc_data { 1133 const struct mtk_reg_map *reg_map; 1134 u32 ana_rgc3; 1135 u64 caps; 1136 u64 required_clks; 1137 bool required_pctl; 1138 u8 offload_version; 1139 u8 hash_offset; 1140 u8 version; 1141 u16 foe_entry_size; 1142 netdev_features_t hw_features; 1143 bool has_accounting; 1144 bool disable_pll_modes; 1145 struct { 1146 u32 txd_size; 1147 u32 rxd_size; 1148 u32 rx_irq_done_mask; 1149 u32 rx_dma_l4_valid; 1150 u32 dma_max_len; 1151 u32 dma_len_offset; 1152 } txrx; 1153 }; 1154 1155 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) 1156 1157 /* currently no SoC has more than 3 macs */ 1158 #define MTK_MAX_DEVS 3 1159 1160 /* struct mtk_eth - This is the main datasructure for holding the state 1161 * of the driver 1162 * @dev: The device pointer 1163 * @dev: The device pointer used for dma mapping/alloc 1164 * @base: The mapped register i/o base 1165 * @page_lock: Make sure that register operations are atomic 1166 * @tx_irq__lock: Make sure that IRQ register operations are atomic 1167 * @rx_irq__lock: Make sure that IRQ register operations are atomic 1168 * @dim_lock: Make sure that Net DIM operations are atomic 1169 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a 1170 * dummy for NAPI to work 1171 * @netdev: The netdev instances 1172 * @mac: Each netdev is linked to a physical MAC 1173 * @irq: The IRQ that we are using 1174 * @msg_enable: Ethtool msg level 1175 * @ethsys: The register map pointing at the range used to setup 1176 * MII modes 1177 * @infra: The register map pointing at the range used to setup 1178 * SGMII and GePHY path 1179 * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances 1180 * @pctl: The register map pointing at the range used to setup 1181 * GMAC port drive/slew values 1182 * @dma_refcnt: track how many netdevs are using the DMA engine 1183 * @tx_ring: Pointer to the memory holding info about the TX ring 1184 * @rx_ring: Pointer to the memory holding info about the RX ring 1185 * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring 1186 * @tx_napi: The TX NAPI struct 1187 * @rx_napi: The RX NAPI struct 1188 * @rx_events: Net DIM RX event counter 1189 * @rx_packets: Net DIM RX packet counter 1190 * @rx_bytes: Net DIM RX byte counter 1191 * @rx_dim: Net DIM RX context 1192 * @tx_events: Net DIM TX event counter 1193 * @tx_packets: Net DIM TX packet counter 1194 * @tx_bytes: Net DIM TX byte counter 1195 * @tx_dim: Net DIM TX context 1196 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 1197 * @phy_scratch_ring: physical address of scratch_ring 1198 * @scratch_head: The scratch memory that scratch_ring points to. 1199 * @clks: clock array for all clocks required 1200 * @mii_bus: If there is a bus we need to create an instance for it 1201 * @pending_work: The workqueue used to reset the dma ring 1202 * @state: Initialization and runtime state of the device 1203 * @soc: Holding specific data among vaious SoCs 1204 */ 1205 1206 struct mtk_eth { 1207 struct device *dev; 1208 struct device *dma_dev; 1209 void __iomem *base; 1210 spinlock_t page_lock; 1211 spinlock_t tx_irq_lock; 1212 spinlock_t rx_irq_lock; 1213 struct net_device dummy_dev; 1214 struct net_device *netdev[MTK_MAX_DEVS]; 1215 struct mtk_mac *mac[MTK_MAX_DEVS]; 1216 int irq[3]; 1217 u32 msg_enable; 1218 unsigned long sysclk; 1219 struct regmap *ethsys; 1220 struct regmap *infra; 1221 struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS]; 1222 struct regmap *pctl; 1223 bool hwlro; 1224 refcount_t dma_refcnt; 1225 struct mtk_tx_ring tx_ring; 1226 struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; 1227 struct mtk_rx_ring rx_ring_qdma; 1228 struct napi_struct tx_napi; 1229 struct napi_struct rx_napi; 1230 void *scratch_ring; 1231 dma_addr_t phy_scratch_ring; 1232 void *scratch_head; 1233 struct clk *clks[MTK_CLK_MAX]; 1234 1235 struct mii_bus *mii_bus; 1236 struct work_struct pending_work; 1237 unsigned long state; 1238 1239 const struct mtk_soc_data *soc; 1240 1241 spinlock_t dim_lock; 1242 1243 u32 rx_events; 1244 u32 rx_packets; 1245 u32 rx_bytes; 1246 struct dim rx_dim; 1247 1248 u32 tx_events; 1249 u32 tx_packets; 1250 u32 tx_bytes; 1251 struct dim tx_dim; 1252 1253 int ip_align; 1254 1255 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; 1256 1257 struct mtk_ppe *ppe[2]; 1258 struct rhashtable flow_table; 1259 1260 struct bpf_prog __rcu *prog; 1261 1262 struct { 1263 struct delayed_work monitor_work; 1264 u32 wdidx; 1265 u8 wdma_hang_count; 1266 u8 qdma_hang_count; 1267 u8 adma_hang_count; 1268 } reset; 1269 }; 1270 1271 /* struct mtk_mac - the structure that holds the info about the MACs of the 1272 * SoC 1273 * @id: The number of the MAC 1274 * @interface: Interface mode kept for detecting change in hw settings 1275 * @of_node: Our devicetree node 1276 * @hw: Backpointer to our main datastruture 1277 * @hw_stats: Packet statistics counter 1278 */ 1279 struct mtk_mac { 1280 int id; 1281 phy_interface_t interface; 1282 int speed; 1283 struct device_node *of_node; 1284 struct phylink *phylink; 1285 struct phylink_config phylink_config; 1286 struct mtk_eth *hw; 1287 struct mtk_hw_stats *hw_stats; 1288 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; 1289 int hwlro_ip_cnt; 1290 unsigned int syscfg0; 1291 struct notifier_block device_notifier; 1292 }; 1293 1294 /* the struct describing the SoC. these are declared in the soc_xyz.c files */ 1295 extern const struct of_device_id of_mtk_match[]; 1296 1297 static inline bool mtk_is_netsys_v1(struct mtk_eth *eth) 1298 { 1299 return eth->soc->version == 1; 1300 } 1301 1302 static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth) 1303 { 1304 return eth->soc->version > 1; 1305 } 1306 1307 static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth) 1308 { 1309 return eth->soc->version > 2; 1310 } 1311 1312 static inline struct mtk_foe_entry * 1313 mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) 1314 { 1315 const struct mtk_soc_data *soc = ppe->eth->soc; 1316 1317 return ppe->foe_table + hash * soc->foe_entry_size; 1318 } 1319 1320 static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) 1321 { 1322 if (mtk_is_netsys_v2_or_greater(eth)) 1323 return MTK_FOE_IB1_BIND_TIMESTAMP_V2; 1324 1325 return MTK_FOE_IB1_BIND_TIMESTAMP; 1326 } 1327 1328 static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) 1329 { 1330 if (mtk_is_netsys_v2_or_greater(eth)) 1331 return MTK_FOE_IB1_BIND_PPPOE_V2; 1332 1333 return MTK_FOE_IB1_BIND_PPPOE; 1334 } 1335 1336 static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) 1337 { 1338 if (mtk_is_netsys_v2_or_greater(eth)) 1339 return MTK_FOE_IB1_BIND_VLAN_TAG_V2; 1340 1341 return MTK_FOE_IB1_BIND_VLAN_TAG; 1342 } 1343 1344 static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) 1345 { 1346 if (mtk_is_netsys_v2_or_greater(eth)) 1347 return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; 1348 1349 return MTK_FOE_IB1_BIND_VLAN_LAYER; 1350 } 1351 1352 static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1353 { 1354 if (mtk_is_netsys_v2_or_greater(eth)) 1355 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1356 1357 return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1358 } 1359 1360 static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) 1361 { 1362 if (mtk_is_netsys_v2_or_greater(eth)) 1363 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); 1364 1365 return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); 1366 } 1367 1368 static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) 1369 { 1370 if (mtk_is_netsys_v2_or_greater(eth)) 1371 return MTK_FOE_IB1_PACKET_TYPE_V2; 1372 1373 return MTK_FOE_IB1_PACKET_TYPE; 1374 } 1375 1376 static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) 1377 { 1378 if (mtk_is_netsys_v2_or_greater(eth)) 1379 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); 1380 1381 return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); 1382 } 1383 1384 static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) 1385 { 1386 if (mtk_is_netsys_v2_or_greater(eth)) 1387 return MTK_FOE_IB2_MULTICAST_V2; 1388 1389 return MTK_FOE_IB2_MULTICAST; 1390 } 1391 1392 /* read the hardware status register */ 1393 void mtk_stats_update_mac(struct mtk_mac *mac); 1394 1395 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); 1396 u32 mtk_r32(struct mtk_eth *eth, unsigned reg); 1397 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg); 1398 1399 int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); 1400 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); 1401 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); 1402 1403 int mtk_eth_offload_init(struct mtk_eth *eth); 1404 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 1405 void *type_data); 1406 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, 1407 int ppe_index); 1408 void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list); 1409 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); 1410 1411 1412 #endif /* MTK_ETH_H */ 1413