1 // SPDX-License-Identifier: GPL-2.0 2 /* Atheros AR71xx built-in ethernet mac driver 3 * 4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de> 5 * 6 * List of authors contributed to this driver before mainlining: 7 * Alexander Couzens <lynxis@fe80.eu> 8 * Christian Lamparter <chunkeey@gmail.com> 9 * Chuanhong Guo <gch981213@gmail.com> 10 * Daniel F. Dickinson <cshored@thecshore.com> 11 * David Bauer <mail@david-bauer.net> 12 * Felix Fietkau <nbd@nbd.name> 13 * Gabor Juhos <juhosg@freemail.hu> 14 * Hauke Mehrtens <hauke@hauke-m.de> 15 * Johann Neuhauser <johann@it-neuhauser.de> 16 * John Crispin <john@phrozen.org> 17 * Jo-Philipp Wich <jo@mein.io> 18 * Koen Vandeputte <koen.vandeputte@ncentric.com> 19 * Lucian Cristian <lucian.cristian@gmail.com> 20 * Matt Merhar <mattmerhar@protonmail.com> 21 * Milan Krstic <milan.krstic@gmail.com> 22 * Petr Štetiar <ynezz@true.cz> 23 * Rosen Penev <rosenp@gmail.com> 24 * Stephen Walker <stephendwalker+github@gmail.com> 25 * Vittorio Gambaletta <openwrt@vittgam.net> 26 * Weijie Gao <hackpascal@gmail.com> 27 * Imre Kaloz <kaloz@openwrt.org> 28 */ 29 30 #include <linux/if_vlan.h> 31 #include <linux/mfd/syscon.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/of_platform.h> 35 #include <linux/regmap.h> 36 #include <linux/reset.h> 37 #include <linux/clk.h> 38 #include <linux/io.h> 39 40 /* For our NAPI weight bigger does *NOT* mean better - it means more 41 * D-cache misses and lots more wasted cycles than we'll ever 42 * possibly gain from saving instructions. 43 */ 44 #define AG71XX_NAPI_WEIGHT 32 45 #define AG71XX_OOM_REFILL (1 + HZ / 10) 46 47 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) 48 #define AG71XX_INT_TX (AG71XX_INT_TX_PS) 49 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) 50 51 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) 52 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) 53 54 #define AG71XX_TX_MTU_LEN 1540 55 56 #define AG71XX_TX_RING_SPLIT 512 57 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ 58 AG71XX_TX_RING_SPLIT) 59 #define AG71XX_TX_RING_SIZE_DEFAULT 128 60 #define AG71XX_RX_RING_SIZE_DEFAULT 256 61 62 #define AG71XX_MDIO_RETRY 1000 63 #define AG71XX_MDIO_DELAY 5 64 #define AG71XX_MDIO_MAX_CLK 5000000 65 66 /* Register offsets */ 67 #define AG71XX_REG_MAC_CFG1 0x0000 68 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */ 69 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ 70 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */ 71 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ 72 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ 73 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ 74 #define MAC_CFG1_SR BIT(31) /* Soft Reset */ 75 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ 76 MAC_CFG1_SRX | MAC_CFG1_STX) 77 78 #define AG71XX_REG_MAC_CFG2 0x0004 79 #define MAC_CFG2_FDX BIT(0) 80 #define MAC_CFG2_PAD_CRC_EN BIT(2) 81 #define MAC_CFG2_LEN_CHECK BIT(4) 82 #define MAC_CFG2_IF_1000 BIT(9) 83 #define MAC_CFG2_IF_10_100 BIT(8) 84 85 #define AG71XX_REG_MAC_MFL 0x0010 86 87 #define AG71XX_REG_MII_CFG 0x0020 88 #define MII_CFG_CLK_DIV_4 0 89 #define MII_CFG_CLK_DIV_6 2 90 #define MII_CFG_CLK_DIV_8 3 91 #define MII_CFG_CLK_DIV_10 4 92 #define MII_CFG_CLK_DIV_14 5 93 #define MII_CFG_CLK_DIV_20 6 94 #define MII_CFG_CLK_DIV_28 7 95 #define MII_CFG_CLK_DIV_34 8 96 #define MII_CFG_CLK_DIV_42 9 97 #define MII_CFG_CLK_DIV_50 10 98 #define MII_CFG_CLK_DIV_58 11 99 #define MII_CFG_CLK_DIV_66 12 100 #define MII_CFG_CLK_DIV_74 13 101 #define MII_CFG_CLK_DIV_82 14 102 #define MII_CFG_CLK_DIV_98 15 103 #define MII_CFG_RESET BIT(31) 104 105 #define AG71XX_REG_MII_CMD 0x0024 106 #define MII_CMD_READ BIT(0) 107 108 #define AG71XX_REG_MII_ADDR 0x0028 109 #define MII_ADDR_SHIFT 8 110 111 #define AG71XX_REG_MII_CTRL 0x002c 112 #define AG71XX_REG_MII_STATUS 0x0030 113 #define AG71XX_REG_MII_IND 0x0034 114 #define MII_IND_BUSY BIT(0) 115 #define MII_IND_INVALID BIT(2) 116 117 #define AG71XX_REG_MAC_IFCTL 0x0038 118 #define MAC_IFCTL_SPEED BIT(16) 119 120 #define AG71XX_REG_MAC_ADDR1 0x0040 121 #define AG71XX_REG_MAC_ADDR2 0x0044 122 #define AG71XX_REG_FIFO_CFG0 0x0048 123 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ 124 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ 125 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ 126 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ 127 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ 128 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ 129 | FIFO_CFG0_TXS | FIFO_CFG0_TXF) 130 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) 131 132 #define FIFO_CFG0_ENABLE_SHIFT 8 133 134 #define AG71XX_REG_FIFO_CFG1 0x004c 135 #define AG71XX_REG_FIFO_CFG2 0x0050 136 #define AG71XX_REG_FIFO_CFG3 0x0054 137 #define AG71XX_REG_FIFO_CFG4 0x0058 138 #define FIFO_CFG4_DE BIT(0) /* Drop Event */ 139 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ 140 #define FIFO_CFG4_FC BIT(2) /* False Carrier */ 141 #define FIFO_CFG4_CE BIT(3) /* Code Error */ 142 #define FIFO_CFG4_CR BIT(4) /* CRC error */ 143 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ 144 #define FIFO_CFG4_LO BIT(6) /* Length out of range */ 145 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */ 146 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ 147 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ 148 #define FIFO_CFG4_DR BIT(10) /* Dribble */ 149 #define FIFO_CFG4_LE BIT(11) /* Long Event */ 150 #define FIFO_CFG4_CF BIT(12) /* Control Frame */ 151 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */ 152 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ 153 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ 154 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ 155 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ 156 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ 157 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ 158 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ 159 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ 160 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ 161 FIFO_CFG4_VT) 162 163 #define AG71XX_REG_FIFO_CFG5 0x005c 164 #define FIFO_CFG5_DE BIT(0) /* Drop Event */ 165 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ 166 #define FIFO_CFG5_FC BIT(2) /* False Carrier */ 167 #define FIFO_CFG5_CE BIT(3) /* Code Error */ 168 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ 169 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ 170 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */ 171 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ 172 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ 173 #define FIFO_CFG5_DR BIT(9) /* Dribble */ 174 #define FIFO_CFG5_CF BIT(10) /* Control Frame */ 175 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */ 176 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ 177 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ 178 #define FIFO_CFG5_LE BIT(14) /* Long Event */ 179 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ 180 #define FIFO_CFG5_16 BIT(16) /* unknown */ 181 #define FIFO_CFG5_17 BIT(17) /* unknown */ 182 #define FIFO_CFG5_SF BIT(18) /* Short Frame */ 183 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ 184 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ 185 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ 186 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ 187 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ 188 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ 189 FIFO_CFG5_17 | FIFO_CFG5_SF) 190 191 #define AG71XX_REG_TX_CTRL 0x0180 192 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ 193 194 #define AG71XX_REG_TX_DESC 0x0184 195 #define AG71XX_REG_TX_STATUS 0x0188 196 #define TX_STATUS_PS BIT(0) /* Packet Sent */ 197 #define TX_STATUS_UR BIT(1) /* Tx Underrun */ 198 #define TX_STATUS_BE BIT(3) /* Bus Error */ 199 200 #define AG71XX_REG_RX_CTRL 0x018c 201 #define RX_CTRL_RXE BIT(0) /* Rx Enable */ 202 203 #define AG71XX_DMA_RETRY 10 204 #define AG71XX_DMA_DELAY 1 205 206 #define AG71XX_REG_RX_DESC 0x0190 207 #define AG71XX_REG_RX_STATUS 0x0194 208 #define RX_STATUS_PR BIT(0) /* Packet Received */ 209 #define RX_STATUS_OF BIT(2) /* Rx Overflow */ 210 #define RX_STATUS_BE BIT(3) /* Bus Error */ 211 212 #define AG71XX_REG_INT_ENABLE 0x0198 213 #define AG71XX_REG_INT_STATUS 0x019c 214 #define AG71XX_INT_TX_PS BIT(0) 215 #define AG71XX_INT_TX_UR BIT(1) 216 #define AG71XX_INT_TX_BE BIT(3) 217 #define AG71XX_INT_RX_PR BIT(4) 218 #define AG71XX_INT_RX_OF BIT(6) 219 #define AG71XX_INT_RX_BE BIT(7) 220 221 #define AG71XX_REG_FIFO_DEPTH 0x01a8 222 #define AG71XX_REG_RX_SM 0x01b0 223 #define AG71XX_REG_TX_SM 0x01b4 224 225 #define ETH_SWITCH_HEADER_LEN 2 226 227 #define AG71XX_DEFAULT_MSG_ENABLE \ 228 (NETIF_MSG_DRV \ 229 | NETIF_MSG_PROBE \ 230 | NETIF_MSG_LINK \ 231 | NETIF_MSG_TIMER \ 232 | NETIF_MSG_IFDOWN \ 233 | NETIF_MSG_IFUP \ 234 | NETIF_MSG_RX_ERR \ 235 | NETIF_MSG_TX_ERR) 236 237 #define DESC_EMPTY BIT(31) 238 #define DESC_MORE BIT(24) 239 #define DESC_PKTLEN_M 0xfff 240 struct ag71xx_desc { 241 u32 data; 242 u32 ctrl; 243 u32 next; 244 u32 pad; 245 } __aligned(4); 246 247 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ 248 L1_CACHE_BYTES) 249 250 struct ag71xx_buf { 251 union { 252 struct { 253 struct sk_buff *skb; 254 unsigned int len; 255 } tx; 256 struct { 257 dma_addr_t dma_addr; 258 void *rx_buf; 259 } rx; 260 }; 261 }; 262 263 struct ag71xx_ring { 264 /* "Hot" fields in the data path. */ 265 unsigned int curr; 266 unsigned int dirty; 267 268 /* "Cold" fields - not used in the data path. */ 269 struct ag71xx_buf *buf; 270 u16 order; 271 u16 desc_split; 272 dma_addr_t descs_dma; 273 u8 *descs_cpu; 274 }; 275 276 enum ag71xx_type { 277 AR7100, 278 AR7240, 279 AR9130, 280 AR9330, 281 AR9340, 282 QCA9530, 283 QCA9550, 284 }; 285 286 struct ag71xx_dcfg { 287 u32 max_frame_len; 288 const u32 *fifodata; 289 u16 desc_pktlen_mask; 290 bool tx_hang_workaround; 291 enum ag71xx_type type; 292 }; 293 294 struct ag71xx { 295 /* Critical data related to the per-packet data path are clustered 296 * early in this structure to help improve the D-cache footprint. 297 */ 298 struct ag71xx_ring rx_ring ____cacheline_aligned; 299 struct ag71xx_ring tx_ring ____cacheline_aligned; 300 301 u16 rx_buf_size; 302 u8 rx_buf_offset; 303 304 struct net_device *ndev; 305 struct platform_device *pdev; 306 struct napi_struct napi; 307 u32 msg_enable; 308 const struct ag71xx_dcfg *dcfg; 309 310 /* From this point onwards we're not looking at per-packet fields. */ 311 void __iomem *mac_base; 312 313 struct ag71xx_desc *stop_desc; 314 dma_addr_t stop_desc_dma; 315 316 int phy_if_mode; 317 318 struct delayed_work restart_work; 319 struct timer_list oom_timer; 320 321 struct reset_control *mac_reset; 322 323 u32 fifodata[3]; 324 int mac_idx; 325 326 struct reset_control *mdio_reset; 327 struct mii_bus *mii_bus; 328 struct clk *clk_mdio; 329 struct clk *clk_eth; 330 }; 331 332 static int ag71xx_desc_empty(struct ag71xx_desc *desc) 333 { 334 return (desc->ctrl & DESC_EMPTY) != 0; 335 } 336 337 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) 338 { 339 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; 340 } 341 342 static int ag71xx_ring_size_order(int size) 343 { 344 return fls(size - 1); 345 } 346 347 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type) 348 { 349 return ag->dcfg->type == type; 350 } 351 352 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value) 353 { 354 iowrite32(value, ag->mac_base + reg); 355 /* flush write */ 356 (void)ioread32(ag->mac_base + reg); 357 } 358 359 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg) 360 { 361 return ioread32(ag->mac_base + reg); 362 } 363 364 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask) 365 { 366 void __iomem *r; 367 368 r = ag->mac_base + reg; 369 iowrite32(ioread32(r) | mask, r); 370 /* flush write */ 371 (void)ioread32(r); 372 } 373 374 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask) 375 { 376 void __iomem *r; 377 378 r = ag->mac_base + reg; 379 iowrite32(ioread32(r) & ~mask, r); 380 /* flush write */ 381 (void)ioread32(r); 382 } 383 384 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints) 385 { 386 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); 387 } 388 389 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints) 390 { 391 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); 392 } 393 394 static int ag71xx_mdio_wait_busy(struct ag71xx *ag) 395 { 396 struct net_device *ndev = ag->ndev; 397 int i; 398 399 for (i = 0; i < AG71XX_MDIO_RETRY; i++) { 400 u32 busy; 401 402 udelay(AG71XX_MDIO_DELAY); 403 404 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND); 405 if (!busy) 406 return 0; 407 408 udelay(AG71XX_MDIO_DELAY); 409 } 410 411 netif_err(ag, link, ndev, "MDIO operation timed out\n"); 412 413 return -ETIMEDOUT; 414 } 415 416 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) 417 { 418 struct ag71xx *ag = bus->priv; 419 int err, val; 420 421 err = ag71xx_mdio_wait_busy(ag); 422 if (err) 423 return err; 424 425 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 426 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 427 /* enable read mode */ 428 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ); 429 430 err = ag71xx_mdio_wait_busy(ag); 431 if (err) 432 return err; 433 434 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS); 435 /* disable read mode */ 436 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0); 437 438 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", 439 addr, reg, val); 440 441 return val; 442 } 443 444 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, 445 u16 val) 446 { 447 struct ag71xx *ag = bus->priv; 448 449 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", 450 addr, reg, val); 451 452 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 453 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 454 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val); 455 456 return ag71xx_mdio_wait_busy(ag); 457 } 458 459 static const u32 ar71xx_mdio_div_table[] = { 460 4, 4, 6, 8, 10, 14, 20, 28, 461 }; 462 463 static const u32 ar7240_mdio_div_table[] = { 464 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, 465 }; 466 467 static const u32 ar933x_mdio_div_table[] = { 468 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, 469 }; 470 471 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div) 472 { 473 unsigned long ref_clock; 474 const u32 *table; 475 int ndivs, i; 476 477 ref_clock = clk_get_rate(ag->clk_mdio); 478 if (!ref_clock) 479 return -EINVAL; 480 481 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) { 482 table = ar933x_mdio_div_table; 483 ndivs = ARRAY_SIZE(ar933x_mdio_div_table); 484 } else if (ag71xx_is(ag, AR7240)) { 485 table = ar7240_mdio_div_table; 486 ndivs = ARRAY_SIZE(ar7240_mdio_div_table); 487 } else { 488 table = ar71xx_mdio_div_table; 489 ndivs = ARRAY_SIZE(ar71xx_mdio_div_table); 490 } 491 492 for (i = 0; i < ndivs; i++) { 493 unsigned long t; 494 495 t = ref_clock / table[i]; 496 if (t <= AG71XX_MDIO_MAX_CLK) { 497 *div = i; 498 return 0; 499 } 500 } 501 502 return -ENOENT; 503 } 504 505 static int ag71xx_mdio_reset(struct mii_bus *bus) 506 { 507 struct ag71xx *ag = bus->priv; 508 int err; 509 u32 t; 510 511 err = ag71xx_mdio_get_divider(ag, &t); 512 if (err) 513 return err; 514 515 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); 516 usleep_range(100, 200); 517 518 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t); 519 usleep_range(100, 200); 520 521 return 0; 522 } 523 524 static int ag71xx_mdio_probe(struct ag71xx *ag) 525 { 526 struct device *dev = &ag->pdev->dev; 527 struct net_device *ndev = ag->ndev; 528 static struct mii_bus *mii_bus; 529 struct device_node *np; 530 int err; 531 532 np = dev->of_node; 533 ag->mii_bus = NULL; 534 535 ag->clk_mdio = devm_clk_get(dev, "mdio"); 536 if (IS_ERR(ag->clk_mdio)) { 537 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); 538 return PTR_ERR(ag->clk_mdio); 539 } 540 541 err = clk_prepare_enable(ag->clk_mdio); 542 if (err) { 543 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); 544 return err; 545 } 546 547 mii_bus = devm_mdiobus_alloc(dev); 548 if (!mii_bus) { 549 err = -ENOMEM; 550 goto mdio_err_put_clk; 551 } 552 553 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); 554 if (IS_ERR(ag->mdio_reset)) { 555 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); 556 return PTR_ERR(ag->mdio_reset); 557 } 558 559 mii_bus->name = "ag71xx_mdio"; 560 mii_bus->read = ag71xx_mdio_mii_read; 561 mii_bus->write = ag71xx_mdio_mii_write; 562 mii_bus->reset = ag71xx_mdio_reset; 563 mii_bus->priv = ag; 564 mii_bus->parent = dev; 565 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); 566 567 if (!IS_ERR(ag->mdio_reset)) { 568 reset_control_assert(ag->mdio_reset); 569 msleep(100); 570 reset_control_deassert(ag->mdio_reset); 571 msleep(200); 572 } 573 574 err = of_mdiobus_register(mii_bus, np); 575 if (err) 576 goto mdio_err_put_clk; 577 578 ag->mii_bus = mii_bus; 579 580 return 0; 581 582 mdio_err_put_clk: 583 clk_disable_unprepare(ag->clk_mdio); 584 return err; 585 } 586 587 static void ag71xx_mdio_remove(struct ag71xx *ag) 588 { 589 if (ag->mii_bus) 590 mdiobus_unregister(ag->mii_bus); 591 clk_disable_unprepare(ag->clk_mdio); 592 } 593 594 static void ag71xx_hw_stop(struct ag71xx *ag) 595 { 596 /* disable all interrupts and stop the rx/tx engine */ 597 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); 598 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 599 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 600 } 601 602 static bool ag71xx_check_dma_stuck(struct ag71xx *ag) 603 { 604 unsigned long timestamp; 605 u32 rx_sm, tx_sm, rx_fd; 606 607 timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start; 608 if (likely(time_before(jiffies, timestamp + HZ / 10))) 609 return false; 610 611 if (!netif_carrier_ok(ag->ndev)) 612 return false; 613 614 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); 615 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) 616 return true; 617 618 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); 619 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); 620 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && 621 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) 622 return true; 623 624 return false; 625 } 626 627 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush) 628 { 629 struct ag71xx_ring *ring = &ag->tx_ring; 630 int sent = 0, bytes_compl = 0, n = 0; 631 struct net_device *ndev = ag->ndev; 632 int ring_mask, ring_size; 633 bool dma_stuck = false; 634 635 ring_mask = BIT(ring->order) - 1; 636 ring_size = BIT(ring->order); 637 638 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n"); 639 640 while (ring->dirty + n != ring->curr) { 641 struct ag71xx_desc *desc; 642 struct sk_buff *skb; 643 unsigned int i; 644 645 i = (ring->dirty + n) & ring_mask; 646 desc = ag71xx_ring_desc(ring, i); 647 skb = ring->buf[i].tx.skb; 648 649 if (!flush && !ag71xx_desc_empty(desc)) { 650 if (ag->dcfg->tx_hang_workaround && 651 ag71xx_check_dma_stuck(ag)) { 652 schedule_delayed_work(&ag->restart_work, 653 HZ / 2); 654 dma_stuck = true; 655 } 656 break; 657 } 658 659 if (flush) 660 desc->ctrl |= DESC_EMPTY; 661 662 n++; 663 if (!skb) 664 continue; 665 666 dev_kfree_skb_any(skb); 667 ring->buf[i].tx.skb = NULL; 668 669 bytes_compl += ring->buf[i].tx.len; 670 671 sent++; 672 ring->dirty += n; 673 674 while (n > 0) { 675 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 676 n--; 677 } 678 } 679 680 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent); 681 682 if (!sent) 683 return 0; 684 685 ag->ndev->stats.tx_bytes += bytes_compl; 686 ag->ndev->stats.tx_packets += sent; 687 688 netdev_completed_queue(ag->ndev, sent, bytes_compl); 689 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) 690 netif_wake_queue(ag->ndev); 691 692 if (!dma_stuck) 693 cancel_delayed_work(&ag->restart_work); 694 695 return sent; 696 } 697 698 static void ag71xx_dma_wait_stop(struct ag71xx *ag) 699 { 700 struct net_device *ndev = ag->ndev; 701 int i; 702 703 for (i = 0; i < AG71XX_DMA_RETRY; i++) { 704 u32 rx, tx; 705 706 mdelay(AG71XX_DMA_DELAY); 707 708 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE; 709 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE; 710 if (!rx && !tx) 711 return; 712 } 713 714 netif_err(ag, hw, ndev, "DMA stop operation timed out\n"); 715 } 716 717 static void ag71xx_dma_reset(struct ag71xx *ag) 718 { 719 struct net_device *ndev = ag->ndev; 720 u32 val; 721 int i; 722 723 /* stop RX and TX */ 724 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 725 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 726 727 /* give the hardware some time to really stop all rx/tx activity 728 * clearing the descriptors too early causes random memory corruption 729 */ 730 ag71xx_dma_wait_stop(ag); 731 732 /* clear descriptor addresses */ 733 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); 734 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); 735 736 /* clear pending RX/TX interrupts */ 737 for (i = 0; i < 256; i++) { 738 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 739 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 740 } 741 742 /* clear pending errors */ 743 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); 744 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); 745 746 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 747 if (val) 748 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n", 749 val); 750 751 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 752 753 /* mask out reserved bits */ 754 val &= ~0xff000000; 755 756 if (val) 757 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n", 758 val); 759 } 760 761 static void ag71xx_hw_setup(struct ag71xx *ag) 762 { 763 u32 init = MAC_CFG1_INIT; 764 765 /* setup MAC configuration registers */ 766 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); 767 768 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, 769 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); 770 771 /* setup max frame length to zero */ 772 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); 773 774 /* setup FIFO configuration registers */ 775 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); 776 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); 777 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); 778 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); 779 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); 780 } 781 782 static unsigned int ag71xx_max_frame_len(unsigned int mtu) 783 { 784 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; 785 } 786 787 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) 788 { 789 u32 t; 790 791 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) 792 | (((u32)mac[3]) << 8) | ((u32)mac[2]); 793 794 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); 795 796 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); 797 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); 798 } 799 800 static void ag71xx_fast_reset(struct ag71xx *ag) 801 { 802 struct net_device *dev = ag->ndev; 803 u32 rx_ds; 804 u32 mii_reg; 805 806 ag71xx_hw_stop(ag); 807 808 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); 809 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); 810 811 ag71xx_tx_packets(ag, true); 812 813 reset_control_assert(ag->mac_reset); 814 usleep_range(10, 20); 815 reset_control_deassert(ag->mac_reset); 816 usleep_range(10, 20); 817 818 ag71xx_dma_reset(ag); 819 ag71xx_hw_setup(ag); 820 ag->tx_ring.curr = 0; 821 ag->tx_ring.dirty = 0; 822 netdev_reset_queue(ag->ndev); 823 824 /* setup max frame length */ 825 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 826 ag71xx_max_frame_len(ag->ndev->mtu)); 827 828 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); 829 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 830 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); 831 832 ag71xx_hw_set_macaddr(ag, dev->dev_addr); 833 } 834 835 static void ag71xx_hw_start(struct ag71xx *ag) 836 { 837 /* start RX engine */ 838 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 839 840 /* enable interrupts */ 841 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); 842 843 netif_wake_queue(ag->ndev); 844 } 845 846 static void ag71xx_link_adjust(struct ag71xx *ag, bool update) 847 { 848 struct phy_device *phydev = ag->ndev->phydev; 849 u32 cfg2; 850 u32 ifctl; 851 u32 fifo5; 852 853 if (!phydev->link && update) { 854 ag71xx_hw_stop(ag); 855 return; 856 } 857 858 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 859 ag71xx_fast_reset(ag); 860 861 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); 862 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); 863 cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0; 864 865 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); 866 ifctl &= ~(MAC_IFCTL_SPEED); 867 868 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); 869 fifo5 &= ~FIFO_CFG5_BM; 870 871 switch (phydev->speed) { 872 case SPEED_1000: 873 cfg2 |= MAC_CFG2_IF_1000; 874 fifo5 |= FIFO_CFG5_BM; 875 break; 876 case SPEED_100: 877 cfg2 |= MAC_CFG2_IF_10_100; 878 ifctl |= MAC_IFCTL_SPEED; 879 break; 880 case SPEED_10: 881 cfg2 |= MAC_CFG2_IF_10_100; 882 break; 883 default: 884 WARN(1, "not supported speed %i\n", phydev->speed); 885 return; 886 } 887 888 if (ag->tx_ring.desc_split) { 889 ag->fifodata[2] &= 0xffff; 890 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; 891 } 892 893 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); 894 895 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); 896 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); 897 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); 898 899 ag71xx_hw_start(ag); 900 901 if (update) 902 phy_print_status(phydev); 903 } 904 905 static void ag71xx_phy_link_adjust(struct net_device *ndev) 906 { 907 struct ag71xx *ag = netdev_priv(ndev); 908 909 ag71xx_link_adjust(ag, true); 910 } 911 912 static int ag71xx_phy_connect(struct ag71xx *ag) 913 { 914 struct device_node *np = ag->pdev->dev.of_node; 915 struct net_device *ndev = ag->ndev; 916 struct device_node *phy_node; 917 struct phy_device *phydev; 918 int ret; 919 920 if (of_phy_is_fixed_link(np)) { 921 ret = of_phy_register_fixed_link(np); 922 if (ret < 0) { 923 netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n", 924 ret); 925 return ret; 926 } 927 928 phy_node = of_node_get(np); 929 } else { 930 phy_node = of_parse_phandle(np, "phy-handle", 0); 931 } 932 933 if (!phy_node) { 934 netif_err(ag, probe, ndev, "Could not find valid phy node\n"); 935 return -ENODEV; 936 } 937 938 phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust, 939 0, ag->phy_if_mode); 940 941 of_node_put(phy_node); 942 943 if (!phydev) { 944 netif_err(ag, probe, ndev, "Could not connect to PHY device\n"); 945 return -ENODEV; 946 } 947 948 phy_attached_info(phydev); 949 950 return 0; 951 } 952 953 static void ag71xx_ring_tx_clean(struct ag71xx *ag) 954 { 955 struct ag71xx_ring *ring = &ag->tx_ring; 956 int ring_mask = BIT(ring->order) - 1; 957 u32 bytes_compl = 0, pkts_compl = 0; 958 struct net_device *ndev = ag->ndev; 959 960 while (ring->curr != ring->dirty) { 961 struct ag71xx_desc *desc; 962 u32 i = ring->dirty & ring_mask; 963 964 desc = ag71xx_ring_desc(ring, i); 965 if (!ag71xx_desc_empty(desc)) { 966 desc->ctrl = 0; 967 ndev->stats.tx_errors++; 968 } 969 970 if (ring->buf[i].tx.skb) { 971 bytes_compl += ring->buf[i].tx.len; 972 pkts_compl++; 973 dev_kfree_skb_any(ring->buf[i].tx.skb); 974 } 975 ring->buf[i].tx.skb = NULL; 976 ring->dirty++; 977 } 978 979 /* flush descriptors */ 980 wmb(); 981 982 netdev_completed_queue(ndev, pkts_compl, bytes_compl); 983 } 984 985 static void ag71xx_ring_tx_init(struct ag71xx *ag) 986 { 987 struct ag71xx_ring *ring = &ag->tx_ring; 988 int ring_size = BIT(ring->order); 989 int ring_mask = ring_size - 1; 990 int i; 991 992 for (i = 0; i < ring_size; i++) { 993 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 994 995 desc->next = (u32)(ring->descs_dma + 996 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 997 998 desc->ctrl = DESC_EMPTY; 999 ring->buf[i].tx.skb = NULL; 1000 } 1001 1002 /* flush descriptors */ 1003 wmb(); 1004 1005 ring->curr = 0; 1006 ring->dirty = 0; 1007 netdev_reset_queue(ag->ndev); 1008 } 1009 1010 static void ag71xx_ring_rx_clean(struct ag71xx *ag) 1011 { 1012 struct ag71xx_ring *ring = &ag->rx_ring; 1013 int ring_size = BIT(ring->order); 1014 int i; 1015 1016 if (!ring->buf) 1017 return; 1018 1019 for (i = 0; i < ring_size; i++) 1020 if (ring->buf[i].rx.rx_buf) { 1021 dma_unmap_single(&ag->pdev->dev, 1022 ring->buf[i].rx.dma_addr, 1023 ag->rx_buf_size, DMA_FROM_DEVICE); 1024 skb_free_frag(ring->buf[i].rx.rx_buf); 1025 } 1026 } 1027 1028 static int ag71xx_buffer_size(struct ag71xx *ag) 1029 { 1030 return ag->rx_buf_size + 1031 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1032 } 1033 1034 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, 1035 int offset, 1036 void *(*alloc)(unsigned int size)) 1037 { 1038 struct ag71xx_ring *ring = &ag->rx_ring; 1039 struct ag71xx_desc *desc; 1040 void *data; 1041 1042 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); 1043 1044 data = alloc(ag71xx_buffer_size(ag)); 1045 if (!data) 1046 return false; 1047 1048 buf->rx.rx_buf = data; 1049 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, 1050 DMA_FROM_DEVICE); 1051 desc->data = (u32)buf->rx.dma_addr + offset; 1052 return true; 1053 } 1054 1055 static int ag71xx_ring_rx_init(struct ag71xx *ag) 1056 { 1057 struct ag71xx_ring *ring = &ag->rx_ring; 1058 struct net_device *ndev = ag->ndev; 1059 int ring_mask = BIT(ring->order) - 1; 1060 int ring_size = BIT(ring->order); 1061 unsigned int i; 1062 int ret; 1063 1064 ret = 0; 1065 for (i = 0; i < ring_size; i++) { 1066 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1067 1068 desc->next = (u32)(ring->descs_dma + 1069 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 1070 1071 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n", 1072 desc, desc->next); 1073 } 1074 1075 for (i = 0; i < ring_size; i++) { 1076 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1077 1078 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, 1079 netdev_alloc_frag)) { 1080 ret = -ENOMEM; 1081 break; 1082 } 1083 1084 desc->ctrl = DESC_EMPTY; 1085 } 1086 1087 /* flush descriptors */ 1088 wmb(); 1089 1090 ring->curr = 0; 1091 ring->dirty = 0; 1092 1093 return ret; 1094 } 1095 1096 static int ag71xx_ring_rx_refill(struct ag71xx *ag) 1097 { 1098 struct ag71xx_ring *ring = &ag->rx_ring; 1099 int ring_mask = BIT(ring->order) - 1; 1100 int offset = ag->rx_buf_offset; 1101 unsigned int count; 1102 1103 count = 0; 1104 for (; ring->curr - ring->dirty > 0; ring->dirty++) { 1105 struct ag71xx_desc *desc; 1106 unsigned int i; 1107 1108 i = ring->dirty & ring_mask; 1109 desc = ag71xx_ring_desc(ring, i); 1110 1111 if (!ring->buf[i].rx.rx_buf && 1112 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, 1113 napi_alloc_frag)) 1114 break; 1115 1116 desc->ctrl = DESC_EMPTY; 1117 count++; 1118 } 1119 1120 /* flush descriptors */ 1121 wmb(); 1122 1123 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", 1124 count); 1125 1126 return count; 1127 } 1128 1129 static int ag71xx_rings_init(struct ag71xx *ag) 1130 { 1131 struct ag71xx_ring *tx = &ag->tx_ring; 1132 struct ag71xx_ring *rx = &ag->rx_ring; 1133 int ring_size, tx_size; 1134 1135 ring_size = BIT(tx->order) + BIT(rx->order); 1136 tx_size = BIT(tx->order); 1137 1138 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); 1139 if (!tx->buf) 1140 return -ENOMEM; 1141 1142 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, 1143 ring_size * AG71XX_DESC_SIZE, 1144 &tx->descs_dma, GFP_ATOMIC); 1145 if (!tx->descs_cpu) { 1146 kfree(tx->buf); 1147 tx->buf = NULL; 1148 return -ENOMEM; 1149 } 1150 1151 rx->buf = &tx->buf[BIT(tx->order)]; 1152 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; 1153 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; 1154 1155 ag71xx_ring_tx_init(ag); 1156 return ag71xx_ring_rx_init(ag); 1157 } 1158 1159 static void ag71xx_rings_free(struct ag71xx *ag) 1160 { 1161 struct ag71xx_ring *tx = &ag->tx_ring; 1162 struct ag71xx_ring *rx = &ag->rx_ring; 1163 int ring_size; 1164 1165 ring_size = BIT(tx->order) + BIT(rx->order); 1166 1167 if (tx->descs_cpu) 1168 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, 1169 tx->descs_cpu, tx->descs_dma); 1170 1171 kfree(tx->buf); 1172 1173 tx->descs_cpu = NULL; 1174 rx->descs_cpu = NULL; 1175 tx->buf = NULL; 1176 rx->buf = NULL; 1177 } 1178 1179 static void ag71xx_rings_cleanup(struct ag71xx *ag) 1180 { 1181 ag71xx_ring_rx_clean(ag); 1182 ag71xx_ring_tx_clean(ag); 1183 ag71xx_rings_free(ag); 1184 1185 netdev_reset_queue(ag->ndev); 1186 } 1187 1188 static void ag71xx_hw_init(struct ag71xx *ag) 1189 { 1190 ag71xx_hw_stop(ag); 1191 1192 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); 1193 usleep_range(20, 30); 1194 1195 reset_control_assert(ag->mac_reset); 1196 msleep(100); 1197 reset_control_deassert(ag->mac_reset); 1198 msleep(200); 1199 1200 ag71xx_hw_setup(ag); 1201 1202 ag71xx_dma_reset(ag); 1203 } 1204 1205 static int ag71xx_hw_enable(struct ag71xx *ag) 1206 { 1207 int ret; 1208 1209 ret = ag71xx_rings_init(ag); 1210 if (ret) 1211 return ret; 1212 1213 napi_enable(&ag->napi); 1214 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 1215 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); 1216 netif_start_queue(ag->ndev); 1217 1218 return 0; 1219 } 1220 1221 static void ag71xx_hw_disable(struct ag71xx *ag) 1222 { 1223 netif_stop_queue(ag->ndev); 1224 1225 ag71xx_hw_stop(ag); 1226 ag71xx_dma_reset(ag); 1227 1228 napi_disable(&ag->napi); 1229 del_timer_sync(&ag->oom_timer); 1230 1231 ag71xx_rings_cleanup(ag); 1232 } 1233 1234 static int ag71xx_open(struct net_device *ndev) 1235 { 1236 struct ag71xx *ag = netdev_priv(ndev); 1237 unsigned int max_frame_len; 1238 int ret; 1239 1240 max_frame_len = ag71xx_max_frame_len(ndev->mtu); 1241 ag->rx_buf_size = 1242 SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); 1243 1244 /* setup max frame length */ 1245 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); 1246 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); 1247 1248 ret = ag71xx_hw_enable(ag); 1249 if (ret) 1250 goto err; 1251 1252 ret = ag71xx_phy_connect(ag); 1253 if (ret) 1254 goto err; 1255 1256 phy_start(ndev->phydev); 1257 1258 return 0; 1259 1260 err: 1261 ag71xx_rings_cleanup(ag); 1262 return ret; 1263 } 1264 1265 static int ag71xx_stop(struct net_device *ndev) 1266 { 1267 struct ag71xx *ag = netdev_priv(ndev); 1268 1269 phy_stop(ndev->phydev); 1270 phy_disconnect(ndev->phydev); 1271 ag71xx_hw_disable(ag); 1272 1273 return 0; 1274 } 1275 1276 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) 1277 { 1278 int i, ring_mask, ndesc, split; 1279 struct ag71xx_desc *desc; 1280 1281 ring_mask = BIT(ring->order) - 1; 1282 ndesc = 0; 1283 split = ring->desc_split; 1284 1285 if (!split) 1286 split = len; 1287 1288 while (len > 0) { 1289 unsigned int cur_len = len; 1290 1291 i = (ring->curr + ndesc) & ring_mask; 1292 desc = ag71xx_ring_desc(ring, i); 1293 1294 if (!ag71xx_desc_empty(desc)) 1295 return -1; 1296 1297 if (cur_len > split) { 1298 cur_len = split; 1299 1300 /* TX will hang if DMA transfers <= 4 bytes, 1301 * make sure next segment is more than 4 bytes long. 1302 */ 1303 if (len <= split + 4) 1304 cur_len -= 4; 1305 } 1306 1307 desc->data = addr; 1308 addr += cur_len; 1309 len -= cur_len; 1310 1311 if (len > 0) 1312 cur_len |= DESC_MORE; 1313 1314 /* prevent early tx attempt of this descriptor */ 1315 if (!ndesc) 1316 cur_len |= DESC_EMPTY; 1317 1318 desc->ctrl = cur_len; 1319 ndesc++; 1320 } 1321 1322 return ndesc; 1323 } 1324 1325 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, 1326 struct net_device *ndev) 1327 { 1328 int i, n, ring_min, ring_mask, ring_size; 1329 struct ag71xx *ag = netdev_priv(ndev); 1330 struct ag71xx_ring *ring; 1331 struct ag71xx_desc *desc; 1332 dma_addr_t dma_addr; 1333 1334 ring = &ag->tx_ring; 1335 ring_mask = BIT(ring->order) - 1; 1336 ring_size = BIT(ring->order); 1337 1338 if (skb->len <= 4) { 1339 netif_dbg(ag, tx_err, ndev, "packet len is too small\n"); 1340 goto err_drop; 1341 } 1342 1343 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, 1344 DMA_TO_DEVICE); 1345 1346 i = ring->curr & ring_mask; 1347 desc = ag71xx_ring_desc(ring, i); 1348 1349 /* setup descriptor fields */ 1350 n = ag71xx_fill_dma_desc(ring, (u32)dma_addr, 1351 skb->len & ag->dcfg->desc_pktlen_mask); 1352 if (n < 0) 1353 goto err_drop_unmap; 1354 1355 i = (ring->curr + n - 1) & ring_mask; 1356 ring->buf[i].tx.len = skb->len; 1357 ring->buf[i].tx.skb = skb; 1358 1359 netdev_sent_queue(ndev, skb->len); 1360 1361 skb_tx_timestamp(skb); 1362 1363 desc->ctrl &= ~DESC_EMPTY; 1364 ring->curr += n; 1365 1366 /* flush descriptor */ 1367 wmb(); 1368 1369 ring_min = 2; 1370 if (ring->desc_split) 1371 ring_min *= AG71XX_TX_RING_DS_PER_PKT; 1372 1373 if (ring->curr - ring->dirty >= ring_size - ring_min) { 1374 netif_dbg(ag, tx_err, ndev, "tx queue full\n"); 1375 netif_stop_queue(ndev); 1376 } 1377 1378 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n"); 1379 1380 /* enable TX engine */ 1381 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); 1382 1383 return NETDEV_TX_OK; 1384 1385 err_drop_unmap: 1386 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); 1387 1388 err_drop: 1389 ndev->stats.tx_dropped++; 1390 1391 dev_kfree_skb(skb); 1392 return NETDEV_TX_OK; 1393 } 1394 1395 static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 1396 { 1397 if (!ndev->phydev) 1398 return -EINVAL; 1399 1400 return phy_mii_ioctl(ndev->phydev, ifr, cmd); 1401 } 1402 1403 static void ag71xx_oom_timer_handler(struct timer_list *t) 1404 { 1405 struct ag71xx *ag = from_timer(ag, t, oom_timer); 1406 1407 napi_schedule(&ag->napi); 1408 } 1409 1410 static void ag71xx_tx_timeout(struct net_device *ndev) 1411 { 1412 struct ag71xx *ag = netdev_priv(ndev); 1413 1414 netif_err(ag, tx_err, ndev, "tx timeout\n"); 1415 1416 schedule_delayed_work(&ag->restart_work, 1); 1417 } 1418 1419 static void ag71xx_restart_work_func(struct work_struct *work) 1420 { 1421 struct ag71xx *ag = container_of(work, struct ag71xx, 1422 restart_work.work); 1423 struct net_device *ndev = ag->ndev; 1424 1425 rtnl_lock(); 1426 ag71xx_hw_disable(ag); 1427 ag71xx_hw_enable(ag); 1428 if (ndev->phydev->link) 1429 ag71xx_link_adjust(ag, false); 1430 rtnl_unlock(); 1431 } 1432 1433 static int ag71xx_rx_packets(struct ag71xx *ag, int limit) 1434 { 1435 struct net_device *ndev = ag->ndev; 1436 int ring_mask, ring_size, done = 0; 1437 unsigned int pktlen_mask, offset; 1438 struct sk_buff *next, *skb; 1439 struct ag71xx_ring *ring; 1440 struct list_head rx_list; 1441 1442 ring = &ag->rx_ring; 1443 pktlen_mask = ag->dcfg->desc_pktlen_mask; 1444 offset = ag->rx_buf_offset; 1445 ring_mask = BIT(ring->order) - 1; 1446 ring_size = BIT(ring->order); 1447 1448 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", 1449 limit, ring->curr, ring->dirty); 1450 1451 INIT_LIST_HEAD(&rx_list); 1452 1453 while (done < limit) { 1454 unsigned int i = ring->curr & ring_mask; 1455 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1456 int pktlen; 1457 int err = 0; 1458 1459 if (ag71xx_desc_empty(desc)) 1460 break; 1461 1462 if ((ring->dirty + ring_size) == ring->curr) { 1463 WARN_ONCE(1, "RX out of ring"); 1464 break; 1465 } 1466 1467 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 1468 1469 pktlen = desc->ctrl & pktlen_mask; 1470 pktlen -= ETH_FCS_LEN; 1471 1472 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, 1473 ag->rx_buf_size, DMA_FROM_DEVICE); 1474 1475 ndev->stats.rx_packets++; 1476 ndev->stats.rx_bytes += pktlen; 1477 1478 skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); 1479 if (!skb) { 1480 skb_free_frag(ring->buf[i].rx.rx_buf); 1481 goto next; 1482 } 1483 1484 skb_reserve(skb, offset); 1485 skb_put(skb, pktlen); 1486 1487 if (err) { 1488 ndev->stats.rx_dropped++; 1489 kfree_skb(skb); 1490 } else { 1491 skb->dev = ndev; 1492 skb->ip_summed = CHECKSUM_NONE; 1493 list_add_tail(&skb->list, &rx_list); 1494 } 1495 1496 next: 1497 ring->buf[i].rx.rx_buf = NULL; 1498 done++; 1499 1500 ring->curr++; 1501 } 1502 1503 ag71xx_ring_rx_refill(ag); 1504 1505 list_for_each_entry_safe(skb, next, &rx_list, list) 1506 skb->protocol = eth_type_trans(skb, ndev); 1507 netif_receive_skb_list(&rx_list); 1508 1509 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", 1510 ring->curr, ring->dirty, done); 1511 1512 return done; 1513 } 1514 1515 static int ag71xx_poll(struct napi_struct *napi, int limit) 1516 { 1517 struct ag71xx *ag = container_of(napi, struct ag71xx, napi); 1518 struct ag71xx_ring *rx_ring = &ag->rx_ring; 1519 int rx_ring_size = BIT(rx_ring->order); 1520 struct net_device *ndev = ag->ndev; 1521 int tx_done, rx_done; 1522 u32 status; 1523 1524 tx_done = ag71xx_tx_packets(ag, false); 1525 1526 netif_dbg(ag, rx_status, ndev, "processing RX ring\n"); 1527 rx_done = ag71xx_rx_packets(ag, limit); 1528 1529 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) 1530 goto oom; 1531 1532 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 1533 if (unlikely(status & RX_STATUS_OF)) { 1534 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); 1535 ndev->stats.rx_fifo_errors++; 1536 1537 /* restart RX */ 1538 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 1539 } 1540 1541 if (rx_done < limit) { 1542 if (status & RX_STATUS_PR) 1543 goto more; 1544 1545 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 1546 if (status & TX_STATUS_PS) 1547 goto more; 1548 1549 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", 1550 rx_done, tx_done, limit); 1551 1552 napi_complete(napi); 1553 1554 /* enable interrupts */ 1555 ag71xx_int_enable(ag, AG71XX_INT_POLL); 1556 return rx_done; 1557 } 1558 1559 more: 1560 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", 1561 rx_done, tx_done, limit); 1562 return limit; 1563 1564 oom: 1565 netif_err(ag, rx_err, ndev, "out of memory\n"); 1566 1567 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); 1568 napi_complete(napi); 1569 return 0; 1570 } 1571 1572 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) 1573 { 1574 struct net_device *ndev = dev_id; 1575 struct ag71xx *ag; 1576 u32 status; 1577 1578 ag = netdev_priv(ndev); 1579 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); 1580 1581 if (unlikely(!status)) 1582 return IRQ_NONE; 1583 1584 if (unlikely(status & AG71XX_INT_ERR)) { 1585 if (status & AG71XX_INT_TX_BE) { 1586 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); 1587 netif_err(ag, intr, ndev, "TX BUS error\n"); 1588 } 1589 if (status & AG71XX_INT_RX_BE) { 1590 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); 1591 netif_err(ag, intr, ndev, "RX BUS error\n"); 1592 } 1593 } 1594 1595 if (likely(status & AG71XX_INT_POLL)) { 1596 ag71xx_int_disable(ag, AG71XX_INT_POLL); 1597 netif_dbg(ag, intr, ndev, "enable polling mode\n"); 1598 napi_schedule(&ag->napi); 1599 } 1600 1601 return IRQ_HANDLED; 1602 } 1603 1604 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu) 1605 { 1606 struct ag71xx *ag = netdev_priv(ndev); 1607 1608 ndev->mtu = new_mtu; 1609 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 1610 ag71xx_max_frame_len(ndev->mtu)); 1611 1612 return 0; 1613 } 1614 1615 static const struct net_device_ops ag71xx_netdev_ops = { 1616 .ndo_open = ag71xx_open, 1617 .ndo_stop = ag71xx_stop, 1618 .ndo_start_xmit = ag71xx_hard_start_xmit, 1619 .ndo_do_ioctl = ag71xx_do_ioctl, 1620 .ndo_tx_timeout = ag71xx_tx_timeout, 1621 .ndo_change_mtu = ag71xx_change_mtu, 1622 .ndo_set_mac_address = eth_mac_addr, 1623 .ndo_validate_addr = eth_validate_addr, 1624 }; 1625 1626 static const u32 ar71xx_addr_ar7100[] = { 1627 0x19000000, 0x1a000000, 1628 }; 1629 1630 static int ag71xx_probe(struct platform_device *pdev) 1631 { 1632 struct device_node *np = pdev->dev.of_node; 1633 const struct ag71xx_dcfg *dcfg; 1634 struct net_device *ndev; 1635 struct resource *res; 1636 const void *mac_addr; 1637 int tx_size, err, i; 1638 struct ag71xx *ag; 1639 1640 if (!np) 1641 return -ENODEV; 1642 1643 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); 1644 if (!ndev) 1645 return -ENOMEM; 1646 1647 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1648 if (!res) 1649 return -EINVAL; 1650 1651 dcfg = of_device_get_match_data(&pdev->dev); 1652 if (!dcfg) 1653 return -EINVAL; 1654 1655 ag = netdev_priv(ndev); 1656 ag->mac_idx = -1; 1657 for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) { 1658 if (ar71xx_addr_ar7100[i] == res->start) 1659 ag->mac_idx = i; 1660 } 1661 1662 if (ag->mac_idx < 0) { 1663 netif_err(ag, probe, ndev, "unknown mac idx\n"); 1664 return -EINVAL; 1665 } 1666 1667 ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); 1668 if (IS_ERR(ag->clk_eth)) { 1669 netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); 1670 return PTR_ERR(ag->clk_eth); 1671 } 1672 1673 SET_NETDEV_DEV(ndev, &pdev->dev); 1674 1675 ag->pdev = pdev; 1676 ag->ndev = ndev; 1677 ag->dcfg = dcfg; 1678 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); 1679 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); 1680 1681 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); 1682 if (IS_ERR(ag->mac_reset)) { 1683 netif_err(ag, probe, ndev, "missing mac reset\n"); 1684 err = PTR_ERR(ag->mac_reset); 1685 goto err_free; 1686 } 1687 1688 ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, 1689 res->end - res->start + 1); 1690 if (!ag->mac_base) { 1691 err = -ENOMEM; 1692 goto err_free; 1693 } 1694 1695 ndev->irq = platform_get_irq(pdev, 0); 1696 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 1697 0x0, dev_name(&pdev->dev), ndev); 1698 if (err) { 1699 netif_err(ag, probe, ndev, "unable to request IRQ %d\n", 1700 ndev->irq); 1701 goto err_free; 1702 } 1703 1704 ndev->netdev_ops = &ag71xx_netdev_ops; 1705 1706 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); 1707 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); 1708 1709 tx_size = AG71XX_TX_RING_SIZE_DEFAULT; 1710 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); 1711 1712 ndev->min_mtu = 68; 1713 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); 1714 1715 ag->rx_buf_offset = NET_SKB_PAD; 1716 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 1717 ag->rx_buf_offset += NET_IP_ALIGN; 1718 1719 if (ag71xx_is(ag, AR7100)) { 1720 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; 1721 tx_size *= AG71XX_TX_RING_DS_PER_PKT; 1722 } 1723 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); 1724 1725 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, 1726 sizeof(struct ag71xx_desc), 1727 &ag->stop_desc_dma, GFP_KERNEL); 1728 if (!ag->stop_desc) { 1729 err = -ENOMEM; 1730 goto err_free; 1731 } 1732 1733 ag->stop_desc->data = 0; 1734 ag->stop_desc->ctrl = 0; 1735 ag->stop_desc->next = (u32)ag->stop_desc_dma; 1736 1737 mac_addr = of_get_mac_address(np); 1738 if (!IS_ERR(mac_addr)) 1739 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 1740 if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { 1741 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); 1742 eth_random_addr(ndev->dev_addr); 1743 } 1744 1745 ag->phy_if_mode = of_get_phy_mode(np); 1746 if (ag->phy_if_mode < 0) { 1747 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); 1748 err = ag->phy_if_mode; 1749 goto err_free; 1750 } 1751 1752 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); 1753 1754 err = clk_prepare_enable(ag->clk_eth); 1755 if (err) { 1756 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); 1757 goto err_free; 1758 } 1759 1760 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); 1761 1762 ag71xx_hw_init(ag); 1763 1764 err = ag71xx_mdio_probe(ag); 1765 if (err) 1766 goto err_put_clk; 1767 1768 platform_set_drvdata(pdev, ndev); 1769 1770 err = register_netdev(ndev); 1771 if (err) { 1772 netif_err(ag, probe, ndev, "unable to register net device\n"); 1773 platform_set_drvdata(pdev, NULL); 1774 goto err_mdio_remove; 1775 } 1776 1777 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", 1778 (unsigned long)ag->mac_base, ndev->irq, 1779 phy_modes(ag->phy_if_mode)); 1780 1781 return 0; 1782 1783 err_mdio_remove: 1784 ag71xx_mdio_remove(ag); 1785 err_put_clk: 1786 clk_disable_unprepare(ag->clk_eth); 1787 err_free: 1788 free_netdev(ndev); 1789 return err; 1790 } 1791 1792 static int ag71xx_remove(struct platform_device *pdev) 1793 { 1794 struct net_device *ndev = platform_get_drvdata(pdev); 1795 struct ag71xx *ag; 1796 1797 if (!ndev) 1798 return 0; 1799 1800 ag = netdev_priv(ndev); 1801 unregister_netdev(ndev); 1802 ag71xx_mdio_remove(ag); 1803 clk_disable_unprepare(ag->clk_eth); 1804 platform_set_drvdata(pdev, NULL); 1805 1806 return 0; 1807 } 1808 1809 static const u32 ar71xx_fifo_ar7100[] = { 1810 0x0fff0000, 0x00001fff, 0x00780fff, 1811 }; 1812 1813 static const u32 ar71xx_fifo_ar9130[] = { 1814 0x0fff0000, 0x00001fff, 0x008001ff, 1815 }; 1816 1817 static const u32 ar71xx_fifo_ar9330[] = { 1818 0x0010ffff, 0x015500aa, 0x01f00140, 1819 }; 1820 1821 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = { 1822 .type = AR7100, 1823 .fifodata = ar71xx_fifo_ar7100, 1824 .max_frame_len = 1540, 1825 .desc_pktlen_mask = SZ_4K - 1, 1826 .tx_hang_workaround = false, 1827 }; 1828 1829 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = { 1830 .type = AR7240, 1831 .fifodata = ar71xx_fifo_ar7100, 1832 .max_frame_len = 1540, 1833 .desc_pktlen_mask = SZ_4K - 1, 1834 .tx_hang_workaround = true, 1835 }; 1836 1837 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = { 1838 .type = AR9130, 1839 .fifodata = ar71xx_fifo_ar9130, 1840 .max_frame_len = 1540, 1841 .desc_pktlen_mask = SZ_4K - 1, 1842 .tx_hang_workaround = false, 1843 }; 1844 1845 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = { 1846 .type = AR9330, 1847 .fifodata = ar71xx_fifo_ar9330, 1848 .max_frame_len = 1540, 1849 .desc_pktlen_mask = SZ_4K - 1, 1850 .tx_hang_workaround = true, 1851 }; 1852 1853 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = { 1854 .type = AR9340, 1855 .fifodata = ar71xx_fifo_ar9330, 1856 .max_frame_len = SZ_16K - 1, 1857 .desc_pktlen_mask = SZ_16K - 1, 1858 .tx_hang_workaround = true, 1859 }; 1860 1861 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = { 1862 .type = QCA9530, 1863 .fifodata = ar71xx_fifo_ar9330, 1864 .max_frame_len = SZ_16K - 1, 1865 .desc_pktlen_mask = SZ_16K - 1, 1866 .tx_hang_workaround = true, 1867 }; 1868 1869 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = { 1870 .type = QCA9550, 1871 .fifodata = ar71xx_fifo_ar9330, 1872 .max_frame_len = 1540, 1873 .desc_pktlen_mask = SZ_16K - 1, 1874 .tx_hang_workaround = true, 1875 }; 1876 1877 static const struct of_device_id ag71xx_match[] = { 1878 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 }, 1879 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 }, 1880 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 }, 1881 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 }, 1882 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 }, 1883 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 }, 1884 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 }, 1885 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 }, 1886 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 }, 1887 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 }, 1888 {} 1889 }; 1890 1891 static struct platform_driver ag71xx_driver = { 1892 .probe = ag71xx_probe, 1893 .remove = ag71xx_remove, 1894 .driver = { 1895 .name = "ag71xx", 1896 .of_match_table = ag71xx_match, 1897 } 1898 }; 1899 1900 module_platform_driver(ag71xx_driver); 1901 MODULE_LICENSE("GPL v2"); 1902