1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2020 MediaTek Corporation 4 * Copyright (c) 2020 BayLibre SAS 5 * 6 * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> 7 */ 8 9 #include <linux/bits.h> 10 #include <linux/clk.h> 11 #include <linux/compiler.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/mii.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/of_mdio.h> 22 #include <linux/of_net.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm.h> 25 #include <linux/regmap.h> 26 #include <linux/skbuff.h> 27 #include <linux/spinlock.h> 28 29 #define MTK_STAR_DRVNAME "mtk_star_emac" 30 31 #define MTK_STAR_WAIT_TIMEOUT 300 32 #define MTK_STAR_MAX_FRAME_SIZE 1514 33 #define MTK_STAR_SKB_ALIGNMENT 16 34 #define MTK_STAR_HASHTABLE_MC_LIMIT 256 35 #define MTK_STAR_HASHTABLE_SIZE_MAX 512 36 #define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4) 37 38 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't 39 * work for this controller. 40 */ 41 #define MTK_STAR_IP_ALIGN 2 42 43 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" }; 44 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names) 45 46 /* PHY Control Register 0 */ 47 #define MTK_STAR_REG_PHY_CTRL0 0x0000 48 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13) 49 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14) 50 #define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15) 51 #define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8) 52 #define MTK_STAR_OFF_PHY_CTRL0_PREG 8 53 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16) 54 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16 55 56 /* PHY Control Register 1 */ 57 #define MTK_STAR_REG_PHY_CTRL1 0x0004 58 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0) 59 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8) 60 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9 61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00 62 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01 63 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02 64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11) 65 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12) 66 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13) 67 68 /* MAC Configuration Register */ 69 #define MTK_STAR_REG_MAC_CFG 0x0008 70 #define MTK_STAR_OFF_MAC_CFG_IPG 10 71 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0) 72 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16) 73 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19) 74 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20) 75 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22) 76 #define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31) 77 78 /* Flow-Control Configuration Register */ 79 #define MTK_STAR_REG_FC_CFG 0x000c 80 #define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7) 81 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8) 82 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16 83 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16) 84 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800 85 86 /* ARL Configuration Register */ 87 #define MTK_STAR_REG_ARL_CFG 0x0010 88 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0) 89 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4) 90 91 /* MAC High and Low Bytes Registers */ 92 #define MTK_STAR_REG_MY_MAC_H 0x0014 93 #define MTK_STAR_REG_MY_MAC_L 0x0018 94 95 /* Hash Table Control Register */ 96 #define MTK_STAR_REG_HASH_CTRL 0x001c 97 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0) 98 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12) 99 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13) 100 #define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14) 101 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16) 102 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17) 103 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31) 104 105 /* TX DMA Control Register */ 106 #define MTK_STAR_REG_TX_DMA_CTRL 0x0034 107 #define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0) 108 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1) 109 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2) 110 111 /* RX DMA Control Register */ 112 #define MTK_STAR_REG_RX_DMA_CTRL 0x0038 113 #define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0) 114 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1) 115 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2) 116 117 /* DMA Address Registers */ 118 #define MTK_STAR_REG_TX_DPTR 0x003c 119 #define MTK_STAR_REG_RX_DPTR 0x0040 120 #define MTK_STAR_REG_TX_BASE_ADDR 0x0044 121 #define MTK_STAR_REG_RX_BASE_ADDR 0x0048 122 123 /* Interrupt Status Register */ 124 #define MTK_STAR_REG_INT_STS 0x0050 125 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2) 126 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3) 127 #define MTK_STAR_BIT_INT_STS_FNRC BIT(6) 128 #define MTK_STAR_BIT_INT_STS_TNTC BIT(8) 129 130 /* Interrupt Mask Register */ 131 #define MTK_STAR_REG_INT_MASK 0x0054 132 #define MTK_STAR_BIT_INT_MASK_FNRC BIT(6) 133 134 /* Delay-Macro Register */ 135 #define MTK_STAR_REG_TEST0 0x0058 136 #define MTK_STAR_BIT_INV_RX_CLK BIT(30) 137 #define MTK_STAR_BIT_INV_TX_CLK BIT(31) 138 139 /* Misc. Config Register */ 140 #define MTK_STAR_REG_TEST1 0x005c 141 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31) 142 143 /* Extended Configuration Register */ 144 #define MTK_STAR_REG_EXT_CFG 0x0060 145 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16 146 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16) 147 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400 148 149 /* EthSys Configuration Register */ 150 #define MTK_STAR_REG_SYS_CONF 0x0094 151 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0) 152 #define MTK_STAR_BIT_EXT_MDC_MODE BIT(1) 153 #define MTK_STAR_BIT_SWC_MII_MODE BIT(2) 154 155 /* MAC Clock Configuration Register */ 156 #define MTK_STAR_REG_MAC_CLK_CONF 0x00ac 157 #define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0) 158 #define MTK_STAR_BIT_CLK_DIV_10 0x0a 159 #define MTK_STAR_BIT_CLK_DIV_50 0x32 160 161 /* Counter registers. */ 162 #define MTK_STAR_REG_C_RXOKPKT 0x0100 163 #define MTK_STAR_REG_C_RXOKBYTE 0x0104 164 #define MTK_STAR_REG_C_RXRUNT 0x0108 165 #define MTK_STAR_REG_C_RXLONG 0x010c 166 #define MTK_STAR_REG_C_RXDROP 0x0110 167 #define MTK_STAR_REG_C_RXCRC 0x0114 168 #define MTK_STAR_REG_C_RXARLDROP 0x0118 169 #define MTK_STAR_REG_C_RXVLANDROP 0x011c 170 #define MTK_STAR_REG_C_RXCSERR 0x0120 171 #define MTK_STAR_REG_C_RXPAUSE 0x0124 172 #define MTK_STAR_REG_C_TXOKPKT 0x0128 173 #define MTK_STAR_REG_C_TXOKBYTE 0x012c 174 #define MTK_STAR_REG_C_TXPAUSECOL 0x0130 175 #define MTK_STAR_REG_C_TXRTY 0x0134 176 #define MTK_STAR_REG_C_TXSKIP 0x0138 177 #define MTK_STAR_REG_C_TX_ARP 0x013c 178 #define MTK_STAR_REG_C_RX_RERR 0x01d8 179 #define MTK_STAR_REG_C_RX_UNI 0x01dc 180 #define MTK_STAR_REG_C_RX_MULTI 0x01e0 181 #define MTK_STAR_REG_C_RX_BROAD 0x01e4 182 #define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8 183 #define MTK_STAR_REG_C_TX_UNI 0x01ec 184 #define MTK_STAR_REG_C_TX_MULTI 0x01f0 185 #define MTK_STAR_REG_C_TX_BROAD 0x01f4 186 #define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8 187 #define MTK_STAR_REG_C_TX_LATECOL 0x01fc 188 #define MTK_STAR_REG_C_RX_LENGTHERR 0x0214 189 #define MTK_STAR_REG_C_RX_TWIST 0x0218 190 191 /* Ethernet CFG Control */ 192 #define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4 193 #define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8 194 #define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10 195 #define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0) 196 #define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0 197 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1 198 #define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0) 199 #define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8) 200 201 /* Represents the actual structure of descriptors used by the MAC. We can 202 * reuse the same structure for both TX and RX - the layout is the same, only 203 * the flags differ slightly. 204 */ 205 struct mtk_star_ring_desc { 206 /* Contains both the status flags as well as packet length. */ 207 u32 status; 208 u32 data_ptr; 209 u32 vtag; 210 u32 reserved; 211 }; 212 213 #define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0) 214 #define MTK_STAR_DESC_BIT_RX_CRCE BIT(24) 215 #define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25) 216 #define MTK_STAR_DESC_BIT_INT BIT(27) 217 #define MTK_STAR_DESC_BIT_LS BIT(28) 218 #define MTK_STAR_DESC_BIT_FS BIT(29) 219 #define MTK_STAR_DESC_BIT_EOR BIT(30) 220 #define MTK_STAR_DESC_BIT_COWN BIT(31) 221 222 /* Helper structure for storing data read from/written to descriptors in order 223 * to limit reads from/writes to DMA memory. 224 */ 225 struct mtk_star_ring_desc_data { 226 unsigned int len; 227 unsigned int flags; 228 dma_addr_t dma_addr; 229 struct sk_buff *skb; 230 }; 231 232 #define MTK_STAR_RING_NUM_DESCS 512 233 #define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4) 234 #define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS 235 #define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS 236 #define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2) 237 #define MTK_STAR_DMA_SIZE \ 238 (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc)) 239 240 struct mtk_star_ring { 241 struct mtk_star_ring_desc *descs; 242 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; 243 dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS]; 244 unsigned int head; 245 unsigned int tail; 246 }; 247 248 struct mtk_star_compat { 249 int (*set_interface_mode)(struct net_device *ndev); 250 unsigned char bit_clk_div; 251 }; 252 253 struct mtk_star_priv { 254 struct net_device *ndev; 255 256 struct regmap *regs; 257 struct regmap *pericfg; 258 259 struct clk_bulk_data clks[MTK_STAR_NCLKS]; 260 261 void *ring_base; 262 struct mtk_star_ring_desc *descs_base; 263 dma_addr_t dma_addr; 264 struct mtk_star_ring tx_ring; 265 struct mtk_star_ring rx_ring; 266 267 struct mii_bus *mii; 268 struct napi_struct tx_napi; 269 struct napi_struct rx_napi; 270 271 struct device_node *phy_node; 272 phy_interface_t phy_intf; 273 struct phy_device *phydev; 274 unsigned int link; 275 int speed; 276 int duplex; 277 int pause; 278 bool rmii_rxc; 279 bool rx_inv; 280 bool tx_inv; 281 282 const struct mtk_star_compat *compat_data; 283 284 /* Protects against concurrent descriptor access. */ 285 spinlock_t lock; 286 287 struct rtnl_link_stats64 stats; 288 }; 289 290 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv) 291 { 292 return priv->ndev->dev.parent; 293 } 294 295 static const struct regmap_config mtk_star_regmap_config = { 296 .reg_bits = 32, 297 .val_bits = 32, 298 .reg_stride = 4, 299 .disable_locking = true, 300 }; 301 302 static void mtk_star_ring_init(struct mtk_star_ring *ring, 303 struct mtk_star_ring_desc *descs) 304 { 305 memset(ring, 0, sizeof(*ring)); 306 ring->descs = descs; 307 ring->head = 0; 308 ring->tail = 0; 309 } 310 311 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring, 312 struct mtk_star_ring_desc_data *desc_data) 313 { 314 struct mtk_star_ring_desc *desc = &ring->descs[ring->tail]; 315 unsigned int status; 316 317 status = READ_ONCE(desc->status); 318 dma_rmb(); /* Make sure we read the status bits before checking it. */ 319 320 if (!(status & MTK_STAR_DESC_BIT_COWN)) 321 return -1; 322 323 desc_data->len = status & MTK_STAR_DESC_MSK_LEN; 324 desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN; 325 desc_data->dma_addr = ring->dma_addrs[ring->tail]; 326 desc_data->skb = ring->skbs[ring->tail]; 327 328 ring->dma_addrs[ring->tail] = 0; 329 ring->skbs[ring->tail] = NULL; 330 331 status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR; 332 333 WRITE_ONCE(desc->data_ptr, 0); 334 WRITE_ONCE(desc->status, status); 335 336 ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS; 337 338 return 0; 339 } 340 341 static void mtk_star_ring_push_head(struct mtk_star_ring *ring, 342 struct mtk_star_ring_desc_data *desc_data, 343 unsigned int flags) 344 { 345 struct mtk_star_ring_desc *desc = &ring->descs[ring->head]; 346 unsigned int status; 347 348 status = READ_ONCE(desc->status); 349 350 ring->skbs[ring->head] = desc_data->skb; 351 ring->dma_addrs[ring->head] = desc_data->dma_addr; 352 353 status |= desc_data->len; 354 if (flags) 355 status |= flags; 356 357 WRITE_ONCE(desc->data_ptr, desc_data->dma_addr); 358 WRITE_ONCE(desc->status, status); 359 status &= ~MTK_STAR_DESC_BIT_COWN; 360 /* Flush previous modifications before ownership change. */ 361 dma_wmb(); 362 WRITE_ONCE(desc->status, status); 363 364 ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS; 365 } 366 367 static void 368 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring, 369 struct mtk_star_ring_desc_data *desc_data) 370 { 371 mtk_star_ring_push_head(ring, desc_data, 0); 372 } 373 374 static void 375 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring, 376 struct mtk_star_ring_desc_data *desc_data) 377 { 378 static const unsigned int flags = MTK_STAR_DESC_BIT_FS | 379 MTK_STAR_DESC_BIT_LS | 380 MTK_STAR_DESC_BIT_INT; 381 382 mtk_star_ring_push_head(ring, desc_data, flags); 383 } 384 385 static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring) 386 { 387 u32 avail; 388 389 if (ring->tail > ring->head) 390 avail = ring->tail - ring->head - 1; 391 else 392 avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1; 393 394 return avail; 395 } 396 397 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv, 398 struct sk_buff *skb) 399 { 400 struct device *dev = mtk_star_get_dev(priv); 401 402 /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */ 403 return dma_map_single(dev, skb_tail_pointer(skb) - 2, 404 skb_tailroom(skb), DMA_FROM_DEVICE); 405 } 406 407 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv, 408 struct mtk_star_ring_desc_data *desc_data) 409 { 410 struct device *dev = mtk_star_get_dev(priv); 411 412 dma_unmap_single(dev, desc_data->dma_addr, 413 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE); 414 } 415 416 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv, 417 struct sk_buff *skb) 418 { 419 struct device *dev = mtk_star_get_dev(priv); 420 421 return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 422 } 423 424 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv, 425 struct mtk_star_ring_desc_data *desc_data) 426 { 427 struct device *dev = mtk_star_get_dev(priv); 428 429 return dma_unmap_single(dev, desc_data->dma_addr, 430 skb_headlen(desc_data->skb), DMA_TO_DEVICE); 431 } 432 433 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv) 434 { 435 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG, 436 MTK_STAR_BIT_MAC_CFG_NIC_PD); 437 } 438 439 static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv, 440 bool rx, bool tx) 441 { 442 u32 value; 443 444 regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value); 445 446 if (tx) 447 value &= ~MTK_STAR_BIT_INT_STS_TNTC; 448 if (rx) 449 value &= ~MTK_STAR_BIT_INT_STS_FNRC; 450 451 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value); 452 } 453 454 static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv, 455 bool rx, bool tx) 456 { 457 u32 value; 458 459 regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value); 460 461 if (tx) 462 value |= MTK_STAR_BIT_INT_STS_TNTC; 463 if (rx) 464 value |= MTK_STAR_BIT_INT_STS_FNRC; 465 466 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value); 467 } 468 469 /* Unmask the three interrupts we care about, mask all others. */ 470 static void mtk_star_intr_enable(struct mtk_star_priv *priv) 471 { 472 unsigned int val = MTK_STAR_BIT_INT_STS_TNTC | 473 MTK_STAR_BIT_INT_STS_FNRC | 474 MTK_STAR_REG_INT_STS_MIB_CNT_TH; 475 476 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val); 477 } 478 479 static void mtk_star_intr_disable(struct mtk_star_priv *priv) 480 { 481 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0); 482 } 483 484 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv) 485 { 486 unsigned int val; 487 488 regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val); 489 regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val); 490 491 return val; 492 } 493 494 static void mtk_star_dma_init(struct mtk_star_priv *priv) 495 { 496 struct mtk_star_ring_desc *desc; 497 unsigned int val; 498 int i; 499 500 priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base; 501 502 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) { 503 desc = &priv->descs_base[i]; 504 505 memset(desc, 0, sizeof(*desc)); 506 desc->status = MTK_STAR_DESC_BIT_COWN; 507 if ((i == MTK_STAR_NUM_TX_DESCS - 1) || 508 (i == MTK_STAR_NUM_DESCS_TOTAL - 1)) 509 desc->status |= MTK_STAR_DESC_BIT_EOR; 510 } 511 512 mtk_star_ring_init(&priv->tx_ring, priv->descs_base); 513 mtk_star_ring_init(&priv->rx_ring, 514 priv->descs_base + MTK_STAR_NUM_TX_DESCS); 515 516 /* Set DMA pointers. */ 517 val = (unsigned int)priv->dma_addr; 518 regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val); 519 regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val); 520 521 val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS; 522 regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val); 523 regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val); 524 } 525 526 static void mtk_star_dma_start(struct mtk_star_priv *priv) 527 { 528 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, 529 MTK_STAR_BIT_TX_DMA_CTRL_START); 530 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, 531 MTK_STAR_BIT_RX_DMA_CTRL_START); 532 } 533 534 static void mtk_star_dma_stop(struct mtk_star_priv *priv) 535 { 536 regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, 537 MTK_STAR_BIT_TX_DMA_CTRL_STOP); 538 regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, 539 MTK_STAR_BIT_RX_DMA_CTRL_STOP); 540 } 541 542 static void mtk_star_dma_disable(struct mtk_star_priv *priv) 543 { 544 int i; 545 546 mtk_star_dma_stop(priv); 547 548 /* Take back all descriptors. */ 549 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) 550 priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN; 551 } 552 553 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv) 554 { 555 regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, 556 MTK_STAR_BIT_RX_DMA_CTRL_RESUME); 557 } 558 559 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv) 560 { 561 regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, 562 MTK_STAR_BIT_TX_DMA_CTRL_RESUME); 563 } 564 565 static void mtk_star_set_mac_addr(struct net_device *ndev) 566 { 567 struct mtk_star_priv *priv = netdev_priv(ndev); 568 const u8 *mac_addr = ndev->dev_addr; 569 unsigned int high, low; 570 571 high = mac_addr[0] << 8 | mac_addr[1] << 0; 572 low = mac_addr[2] << 24 | mac_addr[3] << 16 | 573 mac_addr[4] << 8 | mac_addr[5]; 574 575 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high); 576 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low); 577 } 578 579 static void mtk_star_reset_counters(struct mtk_star_priv *priv) 580 { 581 static const unsigned int counter_regs[] = { 582 MTK_STAR_REG_C_RXOKPKT, 583 MTK_STAR_REG_C_RXOKBYTE, 584 MTK_STAR_REG_C_RXRUNT, 585 MTK_STAR_REG_C_RXLONG, 586 MTK_STAR_REG_C_RXDROP, 587 MTK_STAR_REG_C_RXCRC, 588 MTK_STAR_REG_C_RXARLDROP, 589 MTK_STAR_REG_C_RXVLANDROP, 590 MTK_STAR_REG_C_RXCSERR, 591 MTK_STAR_REG_C_RXPAUSE, 592 MTK_STAR_REG_C_TXOKPKT, 593 MTK_STAR_REG_C_TXOKBYTE, 594 MTK_STAR_REG_C_TXPAUSECOL, 595 MTK_STAR_REG_C_TXRTY, 596 MTK_STAR_REG_C_TXSKIP, 597 MTK_STAR_REG_C_TX_ARP, 598 MTK_STAR_REG_C_RX_RERR, 599 MTK_STAR_REG_C_RX_UNI, 600 MTK_STAR_REG_C_RX_MULTI, 601 MTK_STAR_REG_C_RX_BROAD, 602 MTK_STAR_REG_C_RX_ALIGNERR, 603 MTK_STAR_REG_C_TX_UNI, 604 MTK_STAR_REG_C_TX_MULTI, 605 MTK_STAR_REG_C_TX_BROAD, 606 MTK_STAR_REG_C_TX_TIMEOUT, 607 MTK_STAR_REG_C_TX_LATECOL, 608 MTK_STAR_REG_C_RX_LENGTHERR, 609 MTK_STAR_REG_C_RX_TWIST, 610 }; 611 612 unsigned int i, val; 613 614 for (i = 0; i < ARRAY_SIZE(counter_regs); i++) 615 regmap_read(priv->regs, counter_regs[i], &val); 616 } 617 618 static void mtk_star_update_stat(struct mtk_star_priv *priv, 619 unsigned int reg, u64 *stat) 620 { 621 unsigned int val; 622 623 regmap_read(priv->regs, reg, &val); 624 *stat += val; 625 } 626 627 /* Try to get as many stats as possible from the internal registers instead 628 * of tracking them ourselves. 629 */ 630 static void mtk_star_update_stats(struct mtk_star_priv *priv) 631 { 632 struct rtnl_link_stats64 *stats = &priv->stats; 633 634 /* OK packets and bytes. */ 635 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets); 636 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets); 637 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes); 638 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes); 639 640 /* RX & TX multicast. */ 641 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast); 642 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast); 643 644 /* Collisions. */ 645 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL, 646 &stats->collisions); 647 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL, 648 &stats->collisions); 649 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions); 650 651 /* RX Errors. */ 652 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR, 653 &stats->rx_length_errors); 654 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG, 655 &stats->rx_over_errors); 656 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors); 657 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR, 658 &stats->rx_frame_errors); 659 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP, 660 &stats->rx_fifo_errors); 661 /* Sum of the general RX error counter + all of the above. */ 662 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors); 663 stats->rx_errors += stats->rx_length_errors; 664 stats->rx_errors += stats->rx_over_errors; 665 stats->rx_errors += stats->rx_crc_errors; 666 stats->rx_errors += stats->rx_frame_errors; 667 stats->rx_errors += stats->rx_fifo_errors; 668 } 669 670 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev) 671 { 672 uintptr_t tail, offset; 673 struct sk_buff *skb; 674 675 skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE); 676 if (!skb) 677 return NULL; 678 679 /* Align to 16 bytes. */ 680 tail = (uintptr_t)skb_tail_pointer(skb); 681 if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) { 682 offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1); 683 skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset); 684 } 685 686 /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will 687 * extract the Ethernet header (14 bytes) so we need two more bytes. 688 */ 689 skb_reserve(skb, MTK_STAR_IP_ALIGN); 690 691 return skb; 692 } 693 694 static int mtk_star_prepare_rx_skbs(struct net_device *ndev) 695 { 696 struct mtk_star_priv *priv = netdev_priv(ndev); 697 struct mtk_star_ring *ring = &priv->rx_ring; 698 struct device *dev = mtk_star_get_dev(priv); 699 struct mtk_star_ring_desc *desc; 700 struct sk_buff *skb; 701 dma_addr_t dma_addr; 702 int i; 703 704 for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) { 705 skb = mtk_star_alloc_skb(ndev); 706 if (!skb) 707 return -ENOMEM; 708 709 dma_addr = mtk_star_dma_map_rx(priv, skb); 710 if (dma_mapping_error(dev, dma_addr)) { 711 dev_kfree_skb(skb); 712 return -ENOMEM; 713 } 714 715 desc = &ring->descs[i]; 716 desc->data_ptr = dma_addr; 717 desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN; 718 desc->status &= ~MTK_STAR_DESC_BIT_COWN; 719 ring->skbs[i] = skb; 720 ring->dma_addrs[i] = dma_addr; 721 } 722 723 return 0; 724 } 725 726 static void 727 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring, 728 void (*unmap_func)(struct mtk_star_priv *, 729 struct mtk_star_ring_desc_data *)) 730 { 731 struct mtk_star_ring_desc_data desc_data; 732 int i; 733 734 for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) { 735 if (!ring->dma_addrs[i]) 736 continue; 737 738 desc_data.dma_addr = ring->dma_addrs[i]; 739 desc_data.skb = ring->skbs[i]; 740 741 unmap_func(priv, &desc_data); 742 dev_kfree_skb(desc_data.skb); 743 } 744 } 745 746 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv) 747 { 748 struct mtk_star_ring *ring = &priv->rx_ring; 749 750 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx); 751 } 752 753 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv) 754 { 755 struct mtk_star_ring *ring = &priv->tx_ring; 756 757 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx); 758 } 759 760 /** 761 * mtk_star_handle_irq - Interrupt Handler. 762 * @irq: interrupt number. 763 * @data: pointer to a network interface device structure. 764 * Description : this is the driver interrupt service routine. 765 * it mainly handles: 766 * 1. tx complete interrupt for frame transmission. 767 * 2. rx complete interrupt for frame reception. 768 * 3. MAC Management Counter interrupt to avoid counter overflow. 769 **/ 770 static irqreturn_t mtk_star_handle_irq(int irq, void *data) 771 { 772 struct net_device *ndev = data; 773 struct mtk_star_priv *priv = netdev_priv(ndev); 774 unsigned int intr_status = mtk_star_intr_ack_all(priv); 775 bool rx, tx; 776 777 rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) && 778 napi_schedule_prep(&priv->rx_napi); 779 tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) && 780 napi_schedule_prep(&priv->tx_napi); 781 782 if (rx || tx) { 783 spin_lock(&priv->lock); 784 /* mask Rx and TX Complete interrupt */ 785 mtk_star_disable_dma_irq(priv, rx, tx); 786 spin_unlock(&priv->lock); 787 788 if (rx) 789 __napi_schedule(&priv->rx_napi); 790 if (tx) 791 __napi_schedule(&priv->tx_napi); 792 } 793 794 /* interrupt is triggered once any counters reach 0x8000000 */ 795 if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) { 796 mtk_star_update_stats(priv); 797 mtk_star_reset_counters(priv); 798 } 799 800 return IRQ_HANDLED; 801 } 802 803 /* Wait for the completion of any previous command - CMD_START bit must be 804 * cleared by hardware. 805 */ 806 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv) 807 { 808 unsigned int val; 809 810 return regmap_read_poll_timeout_atomic(priv->regs, 811 MTK_STAR_REG_HASH_CTRL, val, 812 !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START), 813 10, MTK_STAR_WAIT_TIMEOUT); 814 } 815 816 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv) 817 { 818 unsigned int val; 819 int ret; 820 821 /* Wait for BIST_DONE bit. */ 822 ret = regmap_read_poll_timeout_atomic(priv->regs, 823 MTK_STAR_REG_HASH_CTRL, val, 824 val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE, 825 10, MTK_STAR_WAIT_TIMEOUT); 826 if (ret) 827 return ret; 828 829 /* Check the BIST_OK bit. */ 830 if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL, 831 MTK_STAR_BIT_HASH_CTRL_BIST_OK)) 832 return -EIO; 833 834 return 0; 835 } 836 837 static int mtk_star_set_hashbit(struct mtk_star_priv *priv, 838 unsigned int hash_addr) 839 { 840 unsigned int val; 841 int ret; 842 843 ret = mtk_star_hash_wait_cmd_start(priv); 844 if (ret) 845 return ret; 846 847 val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR; 848 val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD; 849 val |= MTK_STAR_BIT_HASH_CTRL_CMD_START; 850 val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN; 851 val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA; 852 regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val); 853 854 return mtk_star_hash_wait_ok(priv); 855 } 856 857 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv) 858 { 859 int ret; 860 861 ret = mtk_star_hash_wait_cmd_start(priv); 862 if (ret) 863 return ret; 864 865 regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL, 866 MTK_STAR_BIT_HASH_CTRL_BIST_EN); 867 regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1, 868 MTK_STAR_BIT_TEST1_RST_HASH_MBIST); 869 870 return mtk_star_hash_wait_ok(priv); 871 } 872 873 static void mtk_star_phy_config(struct mtk_star_priv *priv) 874 { 875 unsigned int val; 876 877 if (priv->speed == SPEED_1000) 878 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M; 879 else if (priv->speed == SPEED_100) 880 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M; 881 else 882 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M; 883 val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD; 884 885 val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN; 886 if (priv->pause) { 887 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX; 888 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX; 889 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX; 890 } else { 891 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX; 892 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX; 893 val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX; 894 } 895 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val); 896 897 val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K; 898 val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH; 899 val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR; 900 regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG, 901 MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH | 902 MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val); 903 904 val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K; 905 val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS; 906 regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG, 907 MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val); 908 } 909 910 static void mtk_star_adjust_link(struct net_device *ndev) 911 { 912 struct mtk_star_priv *priv = netdev_priv(ndev); 913 struct phy_device *phydev = priv->phydev; 914 bool new_state = false; 915 916 if (phydev->link) { 917 if (!priv->link) { 918 priv->link = phydev->link; 919 new_state = true; 920 } 921 922 if (priv->speed != phydev->speed) { 923 priv->speed = phydev->speed; 924 new_state = true; 925 } 926 927 if (priv->pause != phydev->pause) { 928 priv->pause = phydev->pause; 929 new_state = true; 930 } 931 } else { 932 if (priv->link) { 933 priv->link = phydev->link; 934 new_state = true; 935 } 936 } 937 938 if (new_state) { 939 if (phydev->link) 940 mtk_star_phy_config(priv); 941 942 phy_print_status(ndev->phydev); 943 } 944 } 945 946 static void mtk_star_init_config(struct mtk_star_priv *priv) 947 { 948 unsigned int val; 949 950 val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE | 951 MTK_STAR_BIT_EXT_MDC_MODE | 952 MTK_STAR_BIT_SWC_MII_MODE); 953 954 regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val); 955 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF, 956 MTK_STAR_MSK_MAC_CLK_CONF, 957 priv->compat_data->bit_clk_div); 958 } 959 960 static int mtk_star_enable(struct net_device *ndev) 961 { 962 struct mtk_star_priv *priv = netdev_priv(ndev); 963 unsigned int val; 964 int ret; 965 966 mtk_star_nic_disable_pd(priv); 967 mtk_star_intr_disable(priv); 968 mtk_star_dma_stop(priv); 969 970 mtk_star_set_mac_addr(ndev); 971 972 /* Configure the MAC */ 973 val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT; 974 val <<= MTK_STAR_OFF_MAC_CFG_IPG; 975 val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522; 976 val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD; 977 val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP; 978 regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val); 979 980 /* Enable Hash Table BIST and reset it */ 981 ret = mtk_star_reset_hash_table(priv); 982 if (ret) 983 return ret; 984 985 /* Setup the hashing algorithm */ 986 regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG, 987 MTK_STAR_BIT_ARL_CFG_HASH_ALG | 988 MTK_STAR_BIT_ARL_CFG_MISC_MODE); 989 990 /* Don't strip VLAN tags */ 991 regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG, 992 MTK_STAR_BIT_MAC_CFG_VLAN_STRIP); 993 994 /* Setup DMA */ 995 mtk_star_dma_init(priv); 996 997 ret = mtk_star_prepare_rx_skbs(ndev); 998 if (ret) 999 goto err_out; 1000 1001 /* Request the interrupt */ 1002 ret = request_irq(ndev->irq, mtk_star_handle_irq, 1003 IRQF_TRIGGER_NONE, ndev->name, ndev); 1004 if (ret) 1005 goto err_free_skbs; 1006 1007 napi_enable(&priv->tx_napi); 1008 napi_enable(&priv->rx_napi); 1009 1010 mtk_star_intr_ack_all(priv); 1011 mtk_star_intr_enable(priv); 1012 1013 /* Connect to and start PHY */ 1014 priv->phydev = of_phy_connect(ndev, priv->phy_node, 1015 mtk_star_adjust_link, 0, priv->phy_intf); 1016 if (!priv->phydev) { 1017 netdev_err(ndev, "failed to connect to PHY\n"); 1018 ret = -ENODEV; 1019 goto err_free_irq; 1020 } 1021 1022 mtk_star_dma_start(priv); 1023 phy_start(priv->phydev); 1024 netif_start_queue(ndev); 1025 1026 return 0; 1027 1028 err_free_irq: 1029 free_irq(ndev->irq, ndev); 1030 err_free_skbs: 1031 mtk_star_free_rx_skbs(priv); 1032 err_out: 1033 return ret; 1034 } 1035 1036 static void mtk_star_disable(struct net_device *ndev) 1037 { 1038 struct mtk_star_priv *priv = netdev_priv(ndev); 1039 1040 netif_stop_queue(ndev); 1041 napi_disable(&priv->tx_napi); 1042 napi_disable(&priv->rx_napi); 1043 mtk_star_intr_disable(priv); 1044 mtk_star_dma_disable(priv); 1045 mtk_star_intr_ack_all(priv); 1046 phy_stop(priv->phydev); 1047 phy_disconnect(priv->phydev); 1048 free_irq(ndev->irq, ndev); 1049 mtk_star_free_rx_skbs(priv); 1050 mtk_star_free_tx_skbs(priv); 1051 } 1052 1053 static int mtk_star_netdev_open(struct net_device *ndev) 1054 { 1055 return mtk_star_enable(ndev); 1056 } 1057 1058 static int mtk_star_netdev_stop(struct net_device *ndev) 1059 { 1060 mtk_star_disable(ndev); 1061 1062 return 0; 1063 } 1064 1065 static int mtk_star_netdev_ioctl(struct net_device *ndev, 1066 struct ifreq *req, int cmd) 1067 { 1068 if (!netif_running(ndev)) 1069 return -EINVAL; 1070 1071 return phy_mii_ioctl(ndev->phydev, req, cmd); 1072 } 1073 1074 static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size) 1075 { 1076 netif_stop_queue(priv->ndev); 1077 1078 /* Might race with mtk_star_tx_poll, check again */ 1079 smp_mb(); 1080 if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size)) 1081 return -EBUSY; 1082 1083 netif_start_queue(priv->ndev); 1084 1085 return 0; 1086 } 1087 1088 static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size) 1089 { 1090 if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size)) 1091 return 0; 1092 1093 return __mtk_star_maybe_stop_tx(priv, size); 1094 } 1095 1096 static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb, 1097 struct net_device *ndev) 1098 { 1099 struct mtk_star_priv *priv = netdev_priv(ndev); 1100 struct mtk_star_ring *ring = &priv->tx_ring; 1101 struct device *dev = mtk_star_get_dev(priv); 1102 struct mtk_star_ring_desc_data desc_data; 1103 int nfrags = skb_shinfo(skb)->nr_frags; 1104 1105 if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) { 1106 if (!netif_queue_stopped(ndev)) { 1107 netif_stop_queue(ndev); 1108 /* This is a hard error, log it. */ 1109 pr_err_ratelimited("Tx ring full when queue awake\n"); 1110 } 1111 return NETDEV_TX_BUSY; 1112 } 1113 1114 desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb); 1115 if (dma_mapping_error(dev, desc_data.dma_addr)) 1116 goto err_drop_packet; 1117 1118 desc_data.skb = skb; 1119 desc_data.len = skb->len; 1120 mtk_star_ring_push_head_tx(ring, &desc_data); 1121 1122 netdev_sent_queue(ndev, skb->len); 1123 1124 mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED); 1125 1126 mtk_star_dma_resume_tx(priv); 1127 1128 return NETDEV_TX_OK; 1129 1130 err_drop_packet: 1131 dev_kfree_skb(skb); 1132 ndev->stats.tx_dropped++; 1133 return NETDEV_TX_OK; 1134 } 1135 1136 /* Returns the number of bytes sent or a negative number on the first 1137 * descriptor owned by DMA. 1138 */ 1139 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv) 1140 { 1141 struct mtk_star_ring *ring = &priv->tx_ring; 1142 struct mtk_star_ring_desc_data desc_data; 1143 int ret; 1144 1145 ret = mtk_star_ring_pop_tail(ring, &desc_data); 1146 if (ret) 1147 return ret; 1148 1149 mtk_star_dma_unmap_tx(priv, &desc_data); 1150 ret = desc_data.skb->len; 1151 dev_kfree_skb_irq(desc_data.skb); 1152 1153 return ret; 1154 } 1155 1156 static int mtk_star_tx_poll(struct napi_struct *napi, int budget) 1157 { 1158 struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv, 1159 tx_napi); 1160 int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0; 1161 struct mtk_star_ring *ring = &priv->tx_ring; 1162 struct net_device *ndev = priv->ndev; 1163 unsigned int head = ring->head; 1164 unsigned int entry = ring->tail; 1165 1166 while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { 1167 ret = mtk_star_tx_complete_one(priv); 1168 if (ret < 0) 1169 break; 1170 1171 count++; 1172 pkts_compl++; 1173 bytes_compl += ret; 1174 entry = ring->tail; 1175 } 1176 1177 netdev_completed_queue(ndev, pkts_compl, bytes_compl); 1178 1179 if (unlikely(netif_queue_stopped(ndev)) && 1180 (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH)) 1181 netif_wake_queue(ndev); 1182 1183 if (napi_complete(napi)) { 1184 spin_lock(&priv->lock); 1185 mtk_star_enable_dma_irq(priv, false, true); 1186 spin_unlock(&priv->lock); 1187 } 1188 1189 return 0; 1190 } 1191 1192 static void mtk_star_netdev_get_stats64(struct net_device *ndev, 1193 struct rtnl_link_stats64 *stats) 1194 { 1195 struct mtk_star_priv *priv = netdev_priv(ndev); 1196 1197 mtk_star_update_stats(priv); 1198 1199 memcpy(stats, &priv->stats, sizeof(*stats)); 1200 } 1201 1202 static void mtk_star_set_rx_mode(struct net_device *ndev) 1203 { 1204 struct mtk_star_priv *priv = netdev_priv(ndev); 1205 struct netdev_hw_addr *hw_addr; 1206 unsigned int hash_addr, i; 1207 int ret; 1208 1209 if (ndev->flags & IFF_PROMISC) { 1210 regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG, 1211 MTK_STAR_BIT_ARL_CFG_MISC_MODE); 1212 } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT || 1213 ndev->flags & IFF_ALLMULTI) { 1214 for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) { 1215 ret = mtk_star_set_hashbit(priv, i); 1216 if (ret) 1217 goto hash_fail; 1218 } 1219 } else { 1220 /* Clear previous settings. */ 1221 ret = mtk_star_reset_hash_table(priv); 1222 if (ret) 1223 goto hash_fail; 1224 1225 netdev_for_each_mc_addr(hw_addr, ndev) { 1226 hash_addr = (hw_addr->addr[0] & 0x01) << 8; 1227 hash_addr += hw_addr->addr[5]; 1228 ret = mtk_star_set_hashbit(priv, hash_addr); 1229 if (ret) 1230 goto hash_fail; 1231 } 1232 } 1233 1234 return; 1235 1236 hash_fail: 1237 if (ret == -ETIMEDOUT) 1238 netdev_err(ndev, "setting hash bit timed out\n"); 1239 else 1240 /* Should be -EIO */ 1241 netdev_err(ndev, "unable to set hash bit"); 1242 } 1243 1244 static const struct net_device_ops mtk_star_netdev_ops = { 1245 .ndo_open = mtk_star_netdev_open, 1246 .ndo_stop = mtk_star_netdev_stop, 1247 .ndo_start_xmit = mtk_star_netdev_start_xmit, 1248 .ndo_get_stats64 = mtk_star_netdev_get_stats64, 1249 .ndo_set_rx_mode = mtk_star_set_rx_mode, 1250 .ndo_eth_ioctl = mtk_star_netdev_ioctl, 1251 .ndo_set_mac_address = eth_mac_addr, 1252 .ndo_validate_addr = eth_validate_addr, 1253 }; 1254 1255 static void mtk_star_get_drvinfo(struct net_device *dev, 1256 struct ethtool_drvinfo *info) 1257 { 1258 strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver)); 1259 } 1260 1261 /* TODO Add ethtool stats. */ 1262 static const struct ethtool_ops mtk_star_ethtool_ops = { 1263 .get_drvinfo = mtk_star_get_drvinfo, 1264 .get_link = ethtool_op_get_link, 1265 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1266 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1267 }; 1268 1269 static int mtk_star_rx(struct mtk_star_priv *priv, int budget) 1270 { 1271 struct mtk_star_ring *ring = &priv->rx_ring; 1272 struct device *dev = mtk_star_get_dev(priv); 1273 struct mtk_star_ring_desc_data desc_data; 1274 struct net_device *ndev = priv->ndev; 1275 struct sk_buff *curr_skb, *new_skb; 1276 dma_addr_t new_dma_addr; 1277 int ret, count = 0; 1278 1279 while (count < budget) { 1280 ret = mtk_star_ring_pop_tail(ring, &desc_data); 1281 if (ret) 1282 return -1; 1283 1284 curr_skb = desc_data.skb; 1285 1286 if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) || 1287 (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) { 1288 /* Error packet -> drop and reuse skb. */ 1289 new_skb = curr_skb; 1290 goto push_new_skb; 1291 } 1292 1293 /* Prepare new skb before receiving the current one. 1294 * Reuse the current skb if we fail at any point. 1295 */ 1296 new_skb = mtk_star_alloc_skb(ndev); 1297 if (!new_skb) { 1298 ndev->stats.rx_dropped++; 1299 new_skb = curr_skb; 1300 goto push_new_skb; 1301 } 1302 1303 new_dma_addr = mtk_star_dma_map_rx(priv, new_skb); 1304 if (dma_mapping_error(dev, new_dma_addr)) { 1305 ndev->stats.rx_dropped++; 1306 dev_kfree_skb(new_skb); 1307 new_skb = curr_skb; 1308 netdev_err(ndev, "DMA mapping error of RX descriptor\n"); 1309 goto push_new_skb; 1310 } 1311 1312 /* We can't fail anymore at this point: 1313 * it's safe to unmap the skb. 1314 */ 1315 mtk_star_dma_unmap_rx(priv, &desc_data); 1316 1317 skb_put(desc_data.skb, desc_data.len); 1318 desc_data.skb->ip_summed = CHECKSUM_NONE; 1319 desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev); 1320 desc_data.skb->dev = ndev; 1321 netif_receive_skb(desc_data.skb); 1322 1323 /* update dma_addr for new skb */ 1324 desc_data.dma_addr = new_dma_addr; 1325 1326 push_new_skb: 1327 1328 count++; 1329 1330 desc_data.len = skb_tailroom(new_skb); 1331 desc_data.skb = new_skb; 1332 mtk_star_ring_push_head_rx(ring, &desc_data); 1333 } 1334 1335 mtk_star_dma_resume_rx(priv); 1336 1337 return count; 1338 } 1339 1340 static int mtk_star_rx_poll(struct napi_struct *napi, int budget) 1341 { 1342 struct mtk_star_priv *priv; 1343 int work_done = 0; 1344 1345 priv = container_of(napi, struct mtk_star_priv, rx_napi); 1346 1347 work_done = mtk_star_rx(priv, budget); 1348 if (work_done < budget) { 1349 napi_complete_done(napi, work_done); 1350 spin_lock(&priv->lock); 1351 mtk_star_enable_dma_irq(priv, true, false); 1352 spin_unlock(&priv->lock); 1353 } 1354 1355 return work_done; 1356 } 1357 1358 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv) 1359 { 1360 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, 1361 MTK_STAR_BIT_PHY_CTRL0_RWOK); 1362 } 1363 1364 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv) 1365 { 1366 unsigned int val; 1367 1368 return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0, 1369 val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK, 1370 10, MTK_STAR_WAIT_TIMEOUT); 1371 } 1372 1373 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum) 1374 { 1375 struct mtk_star_priv *priv = mii->priv; 1376 unsigned int val, data; 1377 int ret; 1378 1379 if (regnum & MII_ADDR_C45) 1380 return -EOPNOTSUPP; 1381 1382 mtk_star_mdio_rwok_clear(priv); 1383 1384 val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG); 1385 val &= MTK_STAR_MSK_PHY_CTRL0_PREG; 1386 val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD; 1387 1388 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); 1389 1390 ret = mtk_star_mdio_rwok_wait(priv); 1391 if (ret) 1392 return ret; 1393 1394 regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data); 1395 1396 data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; 1397 data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA; 1398 1399 return data; 1400 } 1401 1402 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id, 1403 int regnum, u16 data) 1404 { 1405 struct mtk_star_priv *priv = mii->priv; 1406 unsigned int val; 1407 1408 if (regnum & MII_ADDR_C45) 1409 return -EOPNOTSUPP; 1410 1411 mtk_star_mdio_rwok_clear(priv); 1412 1413 val = data; 1414 val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA; 1415 val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; 1416 regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG; 1417 regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG; 1418 val |= regnum; 1419 val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD; 1420 1421 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); 1422 1423 return mtk_star_mdio_rwok_wait(priv); 1424 } 1425 1426 static int mtk_star_mdio_init(struct net_device *ndev) 1427 { 1428 struct mtk_star_priv *priv = netdev_priv(ndev); 1429 struct device *dev = mtk_star_get_dev(priv); 1430 struct device_node *of_node, *mdio_node; 1431 int ret; 1432 1433 of_node = dev->of_node; 1434 1435 mdio_node = of_get_child_by_name(of_node, "mdio"); 1436 if (!mdio_node) 1437 return -ENODEV; 1438 1439 if (!of_device_is_available(mdio_node)) { 1440 ret = -ENODEV; 1441 goto out_put_node; 1442 } 1443 1444 priv->mii = devm_mdiobus_alloc(dev); 1445 if (!priv->mii) { 1446 ret = -ENOMEM; 1447 goto out_put_node; 1448 } 1449 1450 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); 1451 priv->mii->name = "mtk-mac-mdio"; 1452 priv->mii->parent = dev; 1453 priv->mii->read = mtk_star_mdio_read; 1454 priv->mii->write = mtk_star_mdio_write; 1455 priv->mii->priv = priv; 1456 1457 ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); 1458 1459 out_put_node: 1460 of_node_put(mdio_node); 1461 return ret; 1462 } 1463 1464 static __maybe_unused int mtk_star_suspend(struct device *dev) 1465 { 1466 struct mtk_star_priv *priv; 1467 struct net_device *ndev; 1468 1469 ndev = dev_get_drvdata(dev); 1470 priv = netdev_priv(ndev); 1471 1472 if (netif_running(ndev)) 1473 mtk_star_disable(ndev); 1474 1475 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); 1476 1477 return 0; 1478 } 1479 1480 static __maybe_unused int mtk_star_resume(struct device *dev) 1481 { 1482 struct mtk_star_priv *priv; 1483 struct net_device *ndev; 1484 int ret; 1485 1486 ndev = dev_get_drvdata(dev); 1487 priv = netdev_priv(ndev); 1488 1489 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); 1490 if (ret) 1491 return ret; 1492 1493 if (netif_running(ndev)) { 1494 ret = mtk_star_enable(ndev); 1495 if (ret) 1496 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); 1497 } 1498 1499 return ret; 1500 } 1501 1502 static void mtk_star_clk_disable_unprepare(void *data) 1503 { 1504 struct mtk_star_priv *priv = data; 1505 1506 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); 1507 } 1508 1509 static int mtk_star_set_timing(struct mtk_star_priv *priv) 1510 { 1511 struct device *dev = mtk_star_get_dev(priv); 1512 unsigned int delay_val = 0; 1513 1514 switch (priv->phy_intf) { 1515 case PHY_INTERFACE_MODE_MII: 1516 case PHY_INTERFACE_MODE_RMII: 1517 delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv); 1518 delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv); 1519 break; 1520 default: 1521 dev_err(dev, "This interface not supported\n"); 1522 return -EINVAL; 1523 } 1524 1525 return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val); 1526 } 1527 1528 static int mtk_star_probe(struct platform_device *pdev) 1529 { 1530 struct device_node *of_node; 1531 struct mtk_star_priv *priv; 1532 struct net_device *ndev; 1533 struct device *dev; 1534 void __iomem *base; 1535 int ret, i; 1536 1537 dev = &pdev->dev; 1538 of_node = dev->of_node; 1539 1540 ndev = devm_alloc_etherdev(dev, sizeof(*priv)); 1541 if (!ndev) 1542 return -ENOMEM; 1543 1544 priv = netdev_priv(ndev); 1545 priv->ndev = ndev; 1546 priv->compat_data = of_device_get_match_data(&pdev->dev); 1547 SET_NETDEV_DEV(ndev, dev); 1548 platform_set_drvdata(pdev, ndev); 1549 1550 ndev->min_mtu = ETH_ZLEN; 1551 ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE; 1552 1553 spin_lock_init(&priv->lock); 1554 1555 base = devm_platform_ioremap_resource(pdev, 0); 1556 if (IS_ERR(base)) 1557 return PTR_ERR(base); 1558 1559 /* We won't be checking the return values of regmap read & write 1560 * functions. They can only fail for mmio if there's a clock attached 1561 * to regmap which is not the case here. 1562 */ 1563 priv->regs = devm_regmap_init_mmio(dev, base, 1564 &mtk_star_regmap_config); 1565 if (IS_ERR(priv->regs)) 1566 return PTR_ERR(priv->regs); 1567 1568 priv->pericfg = syscon_regmap_lookup_by_phandle(of_node, 1569 "mediatek,pericfg"); 1570 if (IS_ERR(priv->pericfg)) { 1571 dev_err(dev, "Failed to lookup the PERICFG syscon\n"); 1572 return PTR_ERR(priv->pericfg); 1573 } 1574 1575 ndev->irq = platform_get_irq(pdev, 0); 1576 if (ndev->irq < 0) 1577 return ndev->irq; 1578 1579 for (i = 0; i < MTK_STAR_NCLKS; i++) 1580 priv->clks[i].id = mtk_star_clk_names[i]; 1581 ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks); 1582 if (ret) 1583 return ret; 1584 1585 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); 1586 if (ret) 1587 return ret; 1588 1589 ret = devm_add_action_or_reset(dev, 1590 mtk_star_clk_disable_unprepare, priv); 1591 if (ret) 1592 return ret; 1593 1594 ret = of_get_phy_mode(of_node, &priv->phy_intf); 1595 if (ret) { 1596 return ret; 1597 } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII && 1598 priv->phy_intf != PHY_INTERFACE_MODE_MII) { 1599 dev_err(dev, "unsupported phy mode: %s\n", 1600 phy_modes(priv->phy_intf)); 1601 return -EINVAL; 1602 } 1603 1604 priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0); 1605 if (!priv->phy_node) { 1606 dev_err(dev, "failed to retrieve the phy handle from device tree\n"); 1607 return -ENODEV; 1608 } 1609 1610 priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc"); 1611 priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse"); 1612 priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse"); 1613 1614 if (priv->compat_data->set_interface_mode) { 1615 ret = priv->compat_data->set_interface_mode(ndev); 1616 if (ret) { 1617 dev_err(dev, "Failed to set phy interface, err = %d\n", ret); 1618 return -EINVAL; 1619 } 1620 } 1621 1622 ret = mtk_star_set_timing(priv); 1623 if (ret) { 1624 dev_err(dev, "Failed to set timing, err = %d\n", ret); 1625 return -EINVAL; 1626 } 1627 1628 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 1629 if (ret) { 1630 dev_err(dev, "unsupported DMA mask\n"); 1631 return ret; 1632 } 1633 1634 priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE, 1635 &priv->dma_addr, 1636 GFP_KERNEL | GFP_DMA); 1637 if (!priv->ring_base) 1638 return -ENOMEM; 1639 1640 mtk_star_nic_disable_pd(priv); 1641 mtk_star_init_config(priv); 1642 1643 ret = mtk_star_mdio_init(ndev); 1644 if (ret) 1645 return ret; 1646 1647 ret = platform_get_ethdev_address(dev, ndev); 1648 if (ret || !is_valid_ether_addr(ndev->dev_addr)) 1649 eth_hw_addr_random(ndev); 1650 1651 ndev->netdev_ops = &mtk_star_netdev_ops; 1652 ndev->ethtool_ops = &mtk_star_ethtool_ops; 1653 1654 netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll); 1655 netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll); 1656 1657 return devm_register_netdev(dev, ndev); 1658 } 1659 1660 #ifdef CONFIG_OF 1661 static int mt8516_set_interface_mode(struct net_device *ndev) 1662 { 1663 struct mtk_star_priv *priv = netdev_priv(ndev); 1664 struct device *dev = mtk_star_get_dev(priv); 1665 unsigned int intf_val, ret, rmii_rxc; 1666 1667 switch (priv->phy_intf) { 1668 case PHY_INTERFACE_MODE_MII: 1669 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII; 1670 rmii_rxc = 0; 1671 break; 1672 case PHY_INTERFACE_MODE_RMII: 1673 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII; 1674 rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK; 1675 break; 1676 default: 1677 dev_err(dev, "This interface not supported\n"); 1678 return -EINVAL; 1679 } 1680 1681 ret = regmap_update_bits(priv->pericfg, 1682 MTK_PERICFG_REG_NIC_CFG1_CON, 1683 MTK_PERICFG_BIT_NIC_CFG_CON_CLK, 1684 rmii_rxc); 1685 if (ret) 1686 return ret; 1687 1688 return regmap_update_bits(priv->pericfg, 1689 MTK_PERICFG_REG_NIC_CFG0_CON, 1690 MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF, 1691 intf_val); 1692 } 1693 1694 static int mt8365_set_interface_mode(struct net_device *ndev) 1695 { 1696 struct mtk_star_priv *priv = netdev_priv(ndev); 1697 struct device *dev = mtk_star_get_dev(priv); 1698 unsigned int intf_val; 1699 1700 switch (priv->phy_intf) { 1701 case PHY_INTERFACE_MODE_MII: 1702 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII; 1703 break; 1704 case PHY_INTERFACE_MODE_RMII: 1705 intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII; 1706 intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2; 1707 break; 1708 default: 1709 dev_err(dev, "This interface not supported\n"); 1710 return -EINVAL; 1711 } 1712 1713 return regmap_update_bits(priv->pericfg, 1714 MTK_PERICFG_REG_NIC_CFG_CON_V2, 1715 MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF | 1716 MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2, 1717 intf_val); 1718 } 1719 1720 static const struct mtk_star_compat mtk_star_mt8516_compat = { 1721 .set_interface_mode = mt8516_set_interface_mode, 1722 .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10, 1723 }; 1724 1725 static const struct mtk_star_compat mtk_star_mt8365_compat = { 1726 .set_interface_mode = mt8365_set_interface_mode, 1727 .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50, 1728 }; 1729 1730 static const struct of_device_id mtk_star_of_match[] = { 1731 { .compatible = "mediatek,mt8516-eth", 1732 .data = &mtk_star_mt8516_compat }, 1733 { .compatible = "mediatek,mt8518-eth", 1734 .data = &mtk_star_mt8516_compat }, 1735 { .compatible = "mediatek,mt8175-eth", 1736 .data = &mtk_star_mt8516_compat }, 1737 { .compatible = "mediatek,mt8365-eth", 1738 .data = &mtk_star_mt8365_compat }, 1739 { } 1740 }; 1741 MODULE_DEVICE_TABLE(of, mtk_star_of_match); 1742 #endif 1743 1744 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops, 1745 mtk_star_suspend, mtk_star_resume); 1746 1747 static struct platform_driver mtk_star_driver = { 1748 .driver = { 1749 .name = MTK_STAR_DRVNAME, 1750 .pm = &mtk_star_pm_ops, 1751 .of_match_table = of_match_ptr(mtk_star_of_match), 1752 }, 1753 .probe = mtk_star_probe, 1754 }; 1755 module_platform_driver(mtk_star_driver); 1756 1757 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>"); 1758 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver"); 1759 MODULE_LICENSE("GPL"); 1760