1 // SPDX-License-Identifier: GPL-2.0 2 /* Atheros AR71xx built-in ethernet mac driver 3 * 4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de> 5 * 6 * List of authors contributed to this driver before mainlining: 7 * Alexander Couzens <lynxis@fe80.eu> 8 * Christian Lamparter <chunkeey@gmail.com> 9 * Chuanhong Guo <gch981213@gmail.com> 10 * Daniel F. Dickinson <cshored@thecshore.com> 11 * David Bauer <mail@david-bauer.net> 12 * Felix Fietkau <nbd@nbd.name> 13 * Gabor Juhos <juhosg@freemail.hu> 14 * Hauke Mehrtens <hauke@hauke-m.de> 15 * Johann Neuhauser <johann@it-neuhauser.de> 16 * John Crispin <john@phrozen.org> 17 * Jo-Philipp Wich <jo@mein.io> 18 * Koen Vandeputte <koen.vandeputte@ncentric.com> 19 * Lucian Cristian <lucian.cristian@gmail.com> 20 * Matt Merhar <mattmerhar@protonmail.com> 21 * Milan Krstic <milan.krstic@gmail.com> 22 * Petr Štetiar <ynezz@true.cz> 23 * Rosen Penev <rosenp@gmail.com> 24 * Stephen Walker <stephendwalker+github@gmail.com> 25 * Vittorio Gambaletta <openwrt@vittgam.net> 26 * Weijie Gao <hackpascal@gmail.com> 27 * Imre Kaloz <kaloz@openwrt.org> 28 */ 29 30 #include <linux/if_vlan.h> 31 #include <linux/mfd/syscon.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/of_platform.h> 35 #include <linux/regmap.h> 36 #include <linux/reset.h> 37 #include <linux/clk.h> 38 #include <linux/io.h> 39 40 /* For our NAPI weight bigger does *NOT* mean better - it means more 41 * D-cache misses and lots more wasted cycles than we'll ever 42 * possibly gain from saving instructions. 43 */ 44 #define AG71XX_NAPI_WEIGHT 32 45 #define AG71XX_OOM_REFILL (1 + HZ / 10) 46 47 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) 48 #define AG71XX_INT_TX (AG71XX_INT_TX_PS) 49 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) 50 51 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) 52 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) 53 54 #define AG71XX_TX_MTU_LEN 1540 55 56 #define AG71XX_TX_RING_SPLIT 512 57 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ 58 AG71XX_TX_RING_SPLIT) 59 #define AG71XX_TX_RING_SIZE_DEFAULT 128 60 #define AG71XX_RX_RING_SIZE_DEFAULT 256 61 62 #define AG71XX_MDIO_RETRY 1000 63 #define AG71XX_MDIO_DELAY 5 64 #define AG71XX_MDIO_MAX_CLK 5000000 65 66 /* Register offsets */ 67 #define AG71XX_REG_MAC_CFG1 0x0000 68 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */ 69 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ 70 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */ 71 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ 72 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ 73 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ 74 #define MAC_CFG1_SR BIT(31) /* Soft Reset */ 75 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ 76 MAC_CFG1_SRX | MAC_CFG1_STX) 77 78 #define AG71XX_REG_MAC_CFG2 0x0004 79 #define MAC_CFG2_FDX BIT(0) 80 #define MAC_CFG2_PAD_CRC_EN BIT(2) 81 #define MAC_CFG2_LEN_CHECK BIT(4) 82 #define MAC_CFG2_IF_1000 BIT(9) 83 #define MAC_CFG2_IF_10_100 BIT(8) 84 85 #define AG71XX_REG_MAC_MFL 0x0010 86 87 #define AG71XX_REG_MII_CFG 0x0020 88 #define MII_CFG_CLK_DIV_4 0 89 #define MII_CFG_CLK_DIV_6 2 90 #define MII_CFG_CLK_DIV_8 3 91 #define MII_CFG_CLK_DIV_10 4 92 #define MII_CFG_CLK_DIV_14 5 93 #define MII_CFG_CLK_DIV_20 6 94 #define MII_CFG_CLK_DIV_28 7 95 #define MII_CFG_CLK_DIV_34 8 96 #define MII_CFG_CLK_DIV_42 9 97 #define MII_CFG_CLK_DIV_50 10 98 #define MII_CFG_CLK_DIV_58 11 99 #define MII_CFG_CLK_DIV_66 12 100 #define MII_CFG_CLK_DIV_74 13 101 #define MII_CFG_CLK_DIV_82 14 102 #define MII_CFG_CLK_DIV_98 15 103 #define MII_CFG_RESET BIT(31) 104 105 #define AG71XX_REG_MII_CMD 0x0024 106 #define MII_CMD_READ BIT(0) 107 108 #define AG71XX_REG_MII_ADDR 0x0028 109 #define MII_ADDR_SHIFT 8 110 111 #define AG71XX_REG_MII_CTRL 0x002c 112 #define AG71XX_REG_MII_STATUS 0x0030 113 #define AG71XX_REG_MII_IND 0x0034 114 #define MII_IND_BUSY BIT(0) 115 #define MII_IND_INVALID BIT(2) 116 117 #define AG71XX_REG_MAC_IFCTL 0x0038 118 #define MAC_IFCTL_SPEED BIT(16) 119 120 #define AG71XX_REG_MAC_ADDR1 0x0040 121 #define AG71XX_REG_MAC_ADDR2 0x0044 122 #define AG71XX_REG_FIFO_CFG0 0x0048 123 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ 124 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ 125 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ 126 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ 127 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ 128 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ 129 | FIFO_CFG0_TXS | FIFO_CFG0_TXF) 130 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) 131 132 #define FIFO_CFG0_ENABLE_SHIFT 8 133 134 #define AG71XX_REG_FIFO_CFG1 0x004c 135 #define AG71XX_REG_FIFO_CFG2 0x0050 136 #define AG71XX_REG_FIFO_CFG3 0x0054 137 #define AG71XX_REG_FIFO_CFG4 0x0058 138 #define FIFO_CFG4_DE BIT(0) /* Drop Event */ 139 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ 140 #define FIFO_CFG4_FC BIT(2) /* False Carrier */ 141 #define FIFO_CFG4_CE BIT(3) /* Code Error */ 142 #define FIFO_CFG4_CR BIT(4) /* CRC error */ 143 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ 144 #define FIFO_CFG4_LO BIT(6) /* Length out of range */ 145 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */ 146 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ 147 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ 148 #define FIFO_CFG4_DR BIT(10) /* Dribble */ 149 #define FIFO_CFG4_LE BIT(11) /* Long Event */ 150 #define FIFO_CFG4_CF BIT(12) /* Control Frame */ 151 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */ 152 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ 153 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ 154 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ 155 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ 156 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ 157 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ 158 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ 159 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ 160 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ 161 FIFO_CFG4_VT) 162 163 #define AG71XX_REG_FIFO_CFG5 0x005c 164 #define FIFO_CFG5_DE BIT(0) /* Drop Event */ 165 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ 166 #define FIFO_CFG5_FC BIT(2) /* False Carrier */ 167 #define FIFO_CFG5_CE BIT(3) /* Code Error */ 168 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ 169 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ 170 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */ 171 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ 172 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ 173 #define FIFO_CFG5_DR BIT(9) /* Dribble */ 174 #define FIFO_CFG5_CF BIT(10) /* Control Frame */ 175 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */ 176 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ 177 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ 178 #define FIFO_CFG5_LE BIT(14) /* Long Event */ 179 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ 180 #define FIFO_CFG5_16 BIT(16) /* unknown */ 181 #define FIFO_CFG5_17 BIT(17) /* unknown */ 182 #define FIFO_CFG5_SF BIT(18) /* Short Frame */ 183 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ 184 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ 185 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ 186 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ 187 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ 188 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ 189 FIFO_CFG5_17 | FIFO_CFG5_SF) 190 191 #define AG71XX_REG_TX_CTRL 0x0180 192 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ 193 194 #define AG71XX_REG_TX_DESC 0x0184 195 #define AG71XX_REG_TX_STATUS 0x0188 196 #define TX_STATUS_PS BIT(0) /* Packet Sent */ 197 #define TX_STATUS_UR BIT(1) /* Tx Underrun */ 198 #define TX_STATUS_BE BIT(3) /* Bus Error */ 199 200 #define AG71XX_REG_RX_CTRL 0x018c 201 #define RX_CTRL_RXE BIT(0) /* Rx Enable */ 202 203 #define AG71XX_DMA_RETRY 10 204 #define AG71XX_DMA_DELAY 1 205 206 #define AG71XX_REG_RX_DESC 0x0190 207 #define AG71XX_REG_RX_STATUS 0x0194 208 #define RX_STATUS_PR BIT(0) /* Packet Received */ 209 #define RX_STATUS_OF BIT(2) /* Rx Overflow */ 210 #define RX_STATUS_BE BIT(3) /* Bus Error */ 211 212 #define AG71XX_REG_INT_ENABLE 0x0198 213 #define AG71XX_REG_INT_STATUS 0x019c 214 #define AG71XX_INT_TX_PS BIT(0) 215 #define AG71XX_INT_TX_UR BIT(1) 216 #define AG71XX_INT_TX_BE BIT(3) 217 #define AG71XX_INT_RX_PR BIT(4) 218 #define AG71XX_INT_RX_OF BIT(6) 219 #define AG71XX_INT_RX_BE BIT(7) 220 221 #define AG71XX_REG_FIFO_DEPTH 0x01a8 222 #define AG71XX_REG_RX_SM 0x01b0 223 #define AG71XX_REG_TX_SM 0x01b4 224 225 #define ETH_SWITCH_HEADER_LEN 2 226 227 #define AG71XX_DEFAULT_MSG_ENABLE \ 228 (NETIF_MSG_DRV \ 229 | NETIF_MSG_PROBE \ 230 | NETIF_MSG_LINK \ 231 | NETIF_MSG_TIMER \ 232 | NETIF_MSG_IFDOWN \ 233 | NETIF_MSG_IFUP \ 234 | NETIF_MSG_RX_ERR \ 235 | NETIF_MSG_TX_ERR) 236 237 #define DESC_EMPTY BIT(31) 238 #define DESC_MORE BIT(24) 239 #define DESC_PKTLEN_M 0xfff 240 struct ag71xx_desc { 241 u32 data; 242 u32 ctrl; 243 u32 next; 244 u32 pad; 245 } __aligned(4); 246 247 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ 248 L1_CACHE_BYTES) 249 250 struct ag71xx_buf { 251 union { 252 struct { 253 struct sk_buff *skb; 254 unsigned int len; 255 } tx; 256 struct { 257 dma_addr_t dma_addr; 258 void *rx_buf; 259 } rx; 260 }; 261 }; 262 263 struct ag71xx_ring { 264 /* "Hot" fields in the data path. */ 265 unsigned int curr; 266 unsigned int dirty; 267 268 /* "Cold" fields - not used in the data path. */ 269 struct ag71xx_buf *buf; 270 u16 order; 271 u16 desc_split; 272 dma_addr_t descs_dma; 273 u8 *descs_cpu; 274 }; 275 276 enum ag71xx_type { 277 AR7100, 278 AR7240, 279 AR9130, 280 AR9330, 281 AR9340, 282 QCA9530, 283 QCA9550, 284 }; 285 286 struct ag71xx_dcfg { 287 u32 max_frame_len; 288 const u32 *fifodata; 289 u16 desc_pktlen_mask; 290 bool tx_hang_workaround; 291 enum ag71xx_type type; 292 }; 293 294 struct ag71xx { 295 /* Critical data related to the per-packet data path are clustered 296 * early in this structure to help improve the D-cache footprint. 297 */ 298 struct ag71xx_ring rx_ring ____cacheline_aligned; 299 struct ag71xx_ring tx_ring ____cacheline_aligned; 300 301 u16 rx_buf_size; 302 u8 rx_buf_offset; 303 304 struct net_device *ndev; 305 struct platform_device *pdev; 306 struct napi_struct napi; 307 u32 msg_enable; 308 const struct ag71xx_dcfg *dcfg; 309 310 /* From this point onwards we're not looking at per-packet fields. */ 311 void __iomem *mac_base; 312 313 struct ag71xx_desc *stop_desc; 314 dma_addr_t stop_desc_dma; 315 316 phy_interface_t phy_if_mode; 317 318 struct delayed_work restart_work; 319 struct timer_list oom_timer; 320 321 struct reset_control *mac_reset; 322 323 u32 fifodata[3]; 324 int mac_idx; 325 326 struct reset_control *mdio_reset; 327 struct mii_bus *mii_bus; 328 struct clk *clk_mdio; 329 struct clk *clk_eth; 330 }; 331 332 static int ag71xx_desc_empty(struct ag71xx_desc *desc) 333 { 334 return (desc->ctrl & DESC_EMPTY) != 0; 335 } 336 337 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) 338 { 339 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; 340 } 341 342 static int ag71xx_ring_size_order(int size) 343 { 344 return fls(size - 1); 345 } 346 347 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type) 348 { 349 return ag->dcfg->type == type; 350 } 351 352 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value) 353 { 354 iowrite32(value, ag->mac_base + reg); 355 /* flush write */ 356 (void)ioread32(ag->mac_base + reg); 357 } 358 359 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg) 360 { 361 return ioread32(ag->mac_base + reg); 362 } 363 364 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask) 365 { 366 void __iomem *r; 367 368 r = ag->mac_base + reg; 369 iowrite32(ioread32(r) | mask, r); 370 /* flush write */ 371 (void)ioread32(r); 372 } 373 374 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask) 375 { 376 void __iomem *r; 377 378 r = ag->mac_base + reg; 379 iowrite32(ioread32(r) & ~mask, r); 380 /* flush write */ 381 (void)ioread32(r); 382 } 383 384 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints) 385 { 386 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); 387 } 388 389 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints) 390 { 391 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); 392 } 393 394 static int ag71xx_mdio_wait_busy(struct ag71xx *ag) 395 { 396 struct net_device *ndev = ag->ndev; 397 int i; 398 399 for (i = 0; i < AG71XX_MDIO_RETRY; i++) { 400 u32 busy; 401 402 udelay(AG71XX_MDIO_DELAY); 403 404 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND); 405 if (!busy) 406 return 0; 407 408 udelay(AG71XX_MDIO_DELAY); 409 } 410 411 netif_err(ag, link, ndev, "MDIO operation timed out\n"); 412 413 return -ETIMEDOUT; 414 } 415 416 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) 417 { 418 struct ag71xx *ag = bus->priv; 419 int err, val; 420 421 err = ag71xx_mdio_wait_busy(ag); 422 if (err) 423 return err; 424 425 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 426 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 427 /* enable read mode */ 428 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ); 429 430 err = ag71xx_mdio_wait_busy(ag); 431 if (err) 432 return err; 433 434 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS); 435 /* disable read mode */ 436 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0); 437 438 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", 439 addr, reg, val); 440 441 return val; 442 } 443 444 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, 445 u16 val) 446 { 447 struct ag71xx *ag = bus->priv; 448 449 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", 450 addr, reg, val); 451 452 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 453 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 454 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val); 455 456 return ag71xx_mdio_wait_busy(ag); 457 } 458 459 static const u32 ar71xx_mdio_div_table[] = { 460 4, 4, 6, 8, 10, 14, 20, 28, 461 }; 462 463 static const u32 ar7240_mdio_div_table[] = { 464 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, 465 }; 466 467 static const u32 ar933x_mdio_div_table[] = { 468 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, 469 }; 470 471 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div) 472 { 473 unsigned long ref_clock; 474 const u32 *table; 475 int ndivs, i; 476 477 ref_clock = clk_get_rate(ag->clk_mdio); 478 if (!ref_clock) 479 return -EINVAL; 480 481 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) { 482 table = ar933x_mdio_div_table; 483 ndivs = ARRAY_SIZE(ar933x_mdio_div_table); 484 } else if (ag71xx_is(ag, AR7240)) { 485 table = ar7240_mdio_div_table; 486 ndivs = ARRAY_SIZE(ar7240_mdio_div_table); 487 } else { 488 table = ar71xx_mdio_div_table; 489 ndivs = ARRAY_SIZE(ar71xx_mdio_div_table); 490 } 491 492 for (i = 0; i < ndivs; i++) { 493 unsigned long t; 494 495 t = ref_clock / table[i]; 496 if (t <= AG71XX_MDIO_MAX_CLK) { 497 *div = i; 498 return 0; 499 } 500 } 501 502 return -ENOENT; 503 } 504 505 static int ag71xx_mdio_reset(struct mii_bus *bus) 506 { 507 struct ag71xx *ag = bus->priv; 508 int err; 509 u32 t; 510 511 err = ag71xx_mdio_get_divider(ag, &t); 512 if (err) 513 return err; 514 515 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); 516 usleep_range(100, 200); 517 518 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t); 519 usleep_range(100, 200); 520 521 return 0; 522 } 523 524 static int ag71xx_mdio_probe(struct ag71xx *ag) 525 { 526 struct device *dev = &ag->pdev->dev; 527 struct net_device *ndev = ag->ndev; 528 static struct mii_bus *mii_bus; 529 struct device_node *np, *mnp; 530 int err; 531 532 np = dev->of_node; 533 ag->mii_bus = NULL; 534 535 ag->clk_mdio = devm_clk_get(dev, "mdio"); 536 if (IS_ERR(ag->clk_mdio)) { 537 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); 538 return PTR_ERR(ag->clk_mdio); 539 } 540 541 err = clk_prepare_enable(ag->clk_mdio); 542 if (err) { 543 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); 544 return err; 545 } 546 547 mii_bus = devm_mdiobus_alloc(dev); 548 if (!mii_bus) { 549 err = -ENOMEM; 550 goto mdio_err_put_clk; 551 } 552 553 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); 554 if (IS_ERR(ag->mdio_reset)) { 555 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); 556 return PTR_ERR(ag->mdio_reset); 557 } 558 559 mii_bus->name = "ag71xx_mdio"; 560 mii_bus->read = ag71xx_mdio_mii_read; 561 mii_bus->write = ag71xx_mdio_mii_write; 562 mii_bus->reset = ag71xx_mdio_reset; 563 mii_bus->priv = ag; 564 mii_bus->parent = dev; 565 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); 566 567 if (!IS_ERR(ag->mdio_reset)) { 568 reset_control_assert(ag->mdio_reset); 569 msleep(100); 570 reset_control_deassert(ag->mdio_reset); 571 msleep(200); 572 } 573 574 mnp = of_get_child_by_name(np, "mdio"); 575 err = of_mdiobus_register(mii_bus, mnp); 576 of_node_put(mnp); 577 if (err) 578 goto mdio_err_put_clk; 579 580 ag->mii_bus = mii_bus; 581 582 return 0; 583 584 mdio_err_put_clk: 585 clk_disable_unprepare(ag->clk_mdio); 586 return err; 587 } 588 589 static void ag71xx_mdio_remove(struct ag71xx *ag) 590 { 591 if (ag->mii_bus) 592 mdiobus_unregister(ag->mii_bus); 593 clk_disable_unprepare(ag->clk_mdio); 594 } 595 596 static void ag71xx_hw_stop(struct ag71xx *ag) 597 { 598 /* disable all interrupts and stop the rx/tx engine */ 599 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); 600 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 601 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 602 } 603 604 static bool ag71xx_check_dma_stuck(struct ag71xx *ag) 605 { 606 unsigned long timestamp; 607 u32 rx_sm, tx_sm, rx_fd; 608 609 timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start; 610 if (likely(time_before(jiffies, timestamp + HZ / 10))) 611 return false; 612 613 if (!netif_carrier_ok(ag->ndev)) 614 return false; 615 616 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); 617 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) 618 return true; 619 620 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); 621 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); 622 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && 623 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) 624 return true; 625 626 return false; 627 } 628 629 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush) 630 { 631 struct ag71xx_ring *ring = &ag->tx_ring; 632 int sent = 0, bytes_compl = 0, n = 0; 633 struct net_device *ndev = ag->ndev; 634 int ring_mask, ring_size; 635 bool dma_stuck = false; 636 637 ring_mask = BIT(ring->order) - 1; 638 ring_size = BIT(ring->order); 639 640 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n"); 641 642 while (ring->dirty + n != ring->curr) { 643 struct ag71xx_desc *desc; 644 struct sk_buff *skb; 645 unsigned int i; 646 647 i = (ring->dirty + n) & ring_mask; 648 desc = ag71xx_ring_desc(ring, i); 649 skb = ring->buf[i].tx.skb; 650 651 if (!flush && !ag71xx_desc_empty(desc)) { 652 if (ag->dcfg->tx_hang_workaround && 653 ag71xx_check_dma_stuck(ag)) { 654 schedule_delayed_work(&ag->restart_work, 655 HZ / 2); 656 dma_stuck = true; 657 } 658 break; 659 } 660 661 if (flush) 662 desc->ctrl |= DESC_EMPTY; 663 664 n++; 665 if (!skb) 666 continue; 667 668 dev_kfree_skb_any(skb); 669 ring->buf[i].tx.skb = NULL; 670 671 bytes_compl += ring->buf[i].tx.len; 672 673 sent++; 674 ring->dirty += n; 675 676 while (n > 0) { 677 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 678 n--; 679 } 680 } 681 682 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent); 683 684 if (!sent) 685 return 0; 686 687 ag->ndev->stats.tx_bytes += bytes_compl; 688 ag->ndev->stats.tx_packets += sent; 689 690 netdev_completed_queue(ag->ndev, sent, bytes_compl); 691 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) 692 netif_wake_queue(ag->ndev); 693 694 if (!dma_stuck) 695 cancel_delayed_work(&ag->restart_work); 696 697 return sent; 698 } 699 700 static void ag71xx_dma_wait_stop(struct ag71xx *ag) 701 { 702 struct net_device *ndev = ag->ndev; 703 int i; 704 705 for (i = 0; i < AG71XX_DMA_RETRY; i++) { 706 u32 rx, tx; 707 708 mdelay(AG71XX_DMA_DELAY); 709 710 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE; 711 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE; 712 if (!rx && !tx) 713 return; 714 } 715 716 netif_err(ag, hw, ndev, "DMA stop operation timed out\n"); 717 } 718 719 static void ag71xx_dma_reset(struct ag71xx *ag) 720 { 721 struct net_device *ndev = ag->ndev; 722 u32 val; 723 int i; 724 725 /* stop RX and TX */ 726 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 727 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 728 729 /* give the hardware some time to really stop all rx/tx activity 730 * clearing the descriptors too early causes random memory corruption 731 */ 732 ag71xx_dma_wait_stop(ag); 733 734 /* clear descriptor addresses */ 735 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); 736 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); 737 738 /* clear pending RX/TX interrupts */ 739 for (i = 0; i < 256; i++) { 740 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 741 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 742 } 743 744 /* clear pending errors */ 745 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); 746 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); 747 748 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 749 if (val) 750 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n", 751 val); 752 753 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 754 755 /* mask out reserved bits */ 756 val &= ~0xff000000; 757 758 if (val) 759 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n", 760 val); 761 } 762 763 static void ag71xx_hw_setup(struct ag71xx *ag) 764 { 765 u32 init = MAC_CFG1_INIT; 766 767 /* setup MAC configuration registers */ 768 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); 769 770 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, 771 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); 772 773 /* setup max frame length to zero */ 774 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); 775 776 /* setup FIFO configuration registers */ 777 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); 778 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); 779 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); 780 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); 781 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); 782 } 783 784 static unsigned int ag71xx_max_frame_len(unsigned int mtu) 785 { 786 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; 787 } 788 789 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) 790 { 791 u32 t; 792 793 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) 794 | (((u32)mac[3]) << 8) | ((u32)mac[2]); 795 796 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); 797 798 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); 799 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); 800 } 801 802 static void ag71xx_fast_reset(struct ag71xx *ag) 803 { 804 struct net_device *dev = ag->ndev; 805 u32 rx_ds; 806 u32 mii_reg; 807 808 ag71xx_hw_stop(ag); 809 810 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); 811 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); 812 813 ag71xx_tx_packets(ag, true); 814 815 reset_control_assert(ag->mac_reset); 816 usleep_range(10, 20); 817 reset_control_deassert(ag->mac_reset); 818 usleep_range(10, 20); 819 820 ag71xx_dma_reset(ag); 821 ag71xx_hw_setup(ag); 822 ag->tx_ring.curr = 0; 823 ag->tx_ring.dirty = 0; 824 netdev_reset_queue(ag->ndev); 825 826 /* setup max frame length */ 827 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 828 ag71xx_max_frame_len(ag->ndev->mtu)); 829 830 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); 831 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 832 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); 833 834 ag71xx_hw_set_macaddr(ag, dev->dev_addr); 835 } 836 837 static void ag71xx_hw_start(struct ag71xx *ag) 838 { 839 /* start RX engine */ 840 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 841 842 /* enable interrupts */ 843 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); 844 845 netif_wake_queue(ag->ndev); 846 } 847 848 static void ag71xx_link_adjust(struct ag71xx *ag, bool update) 849 { 850 struct phy_device *phydev = ag->ndev->phydev; 851 u32 cfg2; 852 u32 ifctl; 853 u32 fifo5; 854 855 if (!phydev->link && update) { 856 ag71xx_hw_stop(ag); 857 return; 858 } 859 860 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 861 ag71xx_fast_reset(ag); 862 863 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); 864 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); 865 cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0; 866 867 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); 868 ifctl &= ~(MAC_IFCTL_SPEED); 869 870 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); 871 fifo5 &= ~FIFO_CFG5_BM; 872 873 switch (phydev->speed) { 874 case SPEED_1000: 875 cfg2 |= MAC_CFG2_IF_1000; 876 fifo5 |= FIFO_CFG5_BM; 877 break; 878 case SPEED_100: 879 cfg2 |= MAC_CFG2_IF_10_100; 880 ifctl |= MAC_IFCTL_SPEED; 881 break; 882 case SPEED_10: 883 cfg2 |= MAC_CFG2_IF_10_100; 884 break; 885 default: 886 WARN(1, "not supported speed %i\n", phydev->speed); 887 return; 888 } 889 890 if (ag->tx_ring.desc_split) { 891 ag->fifodata[2] &= 0xffff; 892 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; 893 } 894 895 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); 896 897 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); 898 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); 899 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); 900 901 ag71xx_hw_start(ag); 902 903 if (update) 904 phy_print_status(phydev); 905 } 906 907 static void ag71xx_phy_link_adjust(struct net_device *ndev) 908 { 909 struct ag71xx *ag = netdev_priv(ndev); 910 911 ag71xx_link_adjust(ag, true); 912 } 913 914 static int ag71xx_phy_connect(struct ag71xx *ag) 915 { 916 struct device_node *np = ag->pdev->dev.of_node; 917 struct net_device *ndev = ag->ndev; 918 struct device_node *phy_node; 919 struct phy_device *phydev; 920 int ret; 921 922 if (of_phy_is_fixed_link(np)) { 923 ret = of_phy_register_fixed_link(np); 924 if (ret < 0) { 925 netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n", 926 ret); 927 return ret; 928 } 929 930 phy_node = of_node_get(np); 931 } else { 932 phy_node = of_parse_phandle(np, "phy-handle", 0); 933 } 934 935 if (!phy_node) { 936 netif_err(ag, probe, ndev, "Could not find valid phy node\n"); 937 return -ENODEV; 938 } 939 940 phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust, 941 0, ag->phy_if_mode); 942 943 of_node_put(phy_node); 944 945 if (!phydev) { 946 netif_err(ag, probe, ndev, "Could not connect to PHY device\n"); 947 return -ENODEV; 948 } 949 950 phy_attached_info(phydev); 951 952 return 0; 953 } 954 955 static void ag71xx_ring_tx_clean(struct ag71xx *ag) 956 { 957 struct ag71xx_ring *ring = &ag->tx_ring; 958 int ring_mask = BIT(ring->order) - 1; 959 u32 bytes_compl = 0, pkts_compl = 0; 960 struct net_device *ndev = ag->ndev; 961 962 while (ring->curr != ring->dirty) { 963 struct ag71xx_desc *desc; 964 u32 i = ring->dirty & ring_mask; 965 966 desc = ag71xx_ring_desc(ring, i); 967 if (!ag71xx_desc_empty(desc)) { 968 desc->ctrl = 0; 969 ndev->stats.tx_errors++; 970 } 971 972 if (ring->buf[i].tx.skb) { 973 bytes_compl += ring->buf[i].tx.len; 974 pkts_compl++; 975 dev_kfree_skb_any(ring->buf[i].tx.skb); 976 } 977 ring->buf[i].tx.skb = NULL; 978 ring->dirty++; 979 } 980 981 /* flush descriptors */ 982 wmb(); 983 984 netdev_completed_queue(ndev, pkts_compl, bytes_compl); 985 } 986 987 static void ag71xx_ring_tx_init(struct ag71xx *ag) 988 { 989 struct ag71xx_ring *ring = &ag->tx_ring; 990 int ring_size = BIT(ring->order); 991 int ring_mask = ring_size - 1; 992 int i; 993 994 for (i = 0; i < ring_size; i++) { 995 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 996 997 desc->next = (u32)(ring->descs_dma + 998 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 999 1000 desc->ctrl = DESC_EMPTY; 1001 ring->buf[i].tx.skb = NULL; 1002 } 1003 1004 /* flush descriptors */ 1005 wmb(); 1006 1007 ring->curr = 0; 1008 ring->dirty = 0; 1009 netdev_reset_queue(ag->ndev); 1010 } 1011 1012 static void ag71xx_ring_rx_clean(struct ag71xx *ag) 1013 { 1014 struct ag71xx_ring *ring = &ag->rx_ring; 1015 int ring_size = BIT(ring->order); 1016 int i; 1017 1018 if (!ring->buf) 1019 return; 1020 1021 for (i = 0; i < ring_size; i++) 1022 if (ring->buf[i].rx.rx_buf) { 1023 dma_unmap_single(&ag->pdev->dev, 1024 ring->buf[i].rx.dma_addr, 1025 ag->rx_buf_size, DMA_FROM_DEVICE); 1026 skb_free_frag(ring->buf[i].rx.rx_buf); 1027 } 1028 } 1029 1030 static int ag71xx_buffer_size(struct ag71xx *ag) 1031 { 1032 return ag->rx_buf_size + 1033 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1034 } 1035 1036 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, 1037 int offset, 1038 void *(*alloc)(unsigned int size)) 1039 { 1040 struct ag71xx_ring *ring = &ag->rx_ring; 1041 struct ag71xx_desc *desc; 1042 void *data; 1043 1044 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); 1045 1046 data = alloc(ag71xx_buffer_size(ag)); 1047 if (!data) 1048 return false; 1049 1050 buf->rx.rx_buf = data; 1051 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, 1052 DMA_FROM_DEVICE); 1053 desc->data = (u32)buf->rx.dma_addr + offset; 1054 return true; 1055 } 1056 1057 static int ag71xx_ring_rx_init(struct ag71xx *ag) 1058 { 1059 struct ag71xx_ring *ring = &ag->rx_ring; 1060 struct net_device *ndev = ag->ndev; 1061 int ring_mask = BIT(ring->order) - 1; 1062 int ring_size = BIT(ring->order); 1063 unsigned int i; 1064 int ret; 1065 1066 ret = 0; 1067 for (i = 0; i < ring_size; i++) { 1068 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1069 1070 desc->next = (u32)(ring->descs_dma + 1071 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 1072 1073 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n", 1074 desc, desc->next); 1075 } 1076 1077 for (i = 0; i < ring_size; i++) { 1078 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1079 1080 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, 1081 netdev_alloc_frag)) { 1082 ret = -ENOMEM; 1083 break; 1084 } 1085 1086 desc->ctrl = DESC_EMPTY; 1087 } 1088 1089 /* flush descriptors */ 1090 wmb(); 1091 1092 ring->curr = 0; 1093 ring->dirty = 0; 1094 1095 return ret; 1096 } 1097 1098 static int ag71xx_ring_rx_refill(struct ag71xx *ag) 1099 { 1100 struct ag71xx_ring *ring = &ag->rx_ring; 1101 int ring_mask = BIT(ring->order) - 1; 1102 int offset = ag->rx_buf_offset; 1103 unsigned int count; 1104 1105 count = 0; 1106 for (; ring->curr - ring->dirty > 0; ring->dirty++) { 1107 struct ag71xx_desc *desc; 1108 unsigned int i; 1109 1110 i = ring->dirty & ring_mask; 1111 desc = ag71xx_ring_desc(ring, i); 1112 1113 if (!ring->buf[i].rx.rx_buf && 1114 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, 1115 napi_alloc_frag)) 1116 break; 1117 1118 desc->ctrl = DESC_EMPTY; 1119 count++; 1120 } 1121 1122 /* flush descriptors */ 1123 wmb(); 1124 1125 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", 1126 count); 1127 1128 return count; 1129 } 1130 1131 static int ag71xx_rings_init(struct ag71xx *ag) 1132 { 1133 struct ag71xx_ring *tx = &ag->tx_ring; 1134 struct ag71xx_ring *rx = &ag->rx_ring; 1135 int ring_size, tx_size; 1136 1137 ring_size = BIT(tx->order) + BIT(rx->order); 1138 tx_size = BIT(tx->order); 1139 1140 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); 1141 if (!tx->buf) 1142 return -ENOMEM; 1143 1144 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, 1145 ring_size * AG71XX_DESC_SIZE, 1146 &tx->descs_dma, GFP_KERNEL); 1147 if (!tx->descs_cpu) { 1148 kfree(tx->buf); 1149 tx->buf = NULL; 1150 return -ENOMEM; 1151 } 1152 1153 rx->buf = &tx->buf[tx_size]; 1154 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; 1155 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; 1156 1157 ag71xx_ring_tx_init(ag); 1158 return ag71xx_ring_rx_init(ag); 1159 } 1160 1161 static void ag71xx_rings_free(struct ag71xx *ag) 1162 { 1163 struct ag71xx_ring *tx = &ag->tx_ring; 1164 struct ag71xx_ring *rx = &ag->rx_ring; 1165 int ring_size; 1166 1167 ring_size = BIT(tx->order) + BIT(rx->order); 1168 1169 if (tx->descs_cpu) 1170 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, 1171 tx->descs_cpu, tx->descs_dma); 1172 1173 kfree(tx->buf); 1174 1175 tx->descs_cpu = NULL; 1176 rx->descs_cpu = NULL; 1177 tx->buf = NULL; 1178 rx->buf = NULL; 1179 } 1180 1181 static void ag71xx_rings_cleanup(struct ag71xx *ag) 1182 { 1183 ag71xx_ring_rx_clean(ag); 1184 ag71xx_ring_tx_clean(ag); 1185 ag71xx_rings_free(ag); 1186 1187 netdev_reset_queue(ag->ndev); 1188 } 1189 1190 static void ag71xx_hw_init(struct ag71xx *ag) 1191 { 1192 ag71xx_hw_stop(ag); 1193 1194 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); 1195 usleep_range(20, 30); 1196 1197 reset_control_assert(ag->mac_reset); 1198 msleep(100); 1199 reset_control_deassert(ag->mac_reset); 1200 msleep(200); 1201 1202 ag71xx_hw_setup(ag); 1203 1204 ag71xx_dma_reset(ag); 1205 } 1206 1207 static int ag71xx_hw_enable(struct ag71xx *ag) 1208 { 1209 int ret; 1210 1211 ret = ag71xx_rings_init(ag); 1212 if (ret) 1213 return ret; 1214 1215 napi_enable(&ag->napi); 1216 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 1217 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); 1218 netif_start_queue(ag->ndev); 1219 1220 return 0; 1221 } 1222 1223 static void ag71xx_hw_disable(struct ag71xx *ag) 1224 { 1225 netif_stop_queue(ag->ndev); 1226 1227 ag71xx_hw_stop(ag); 1228 ag71xx_dma_reset(ag); 1229 1230 napi_disable(&ag->napi); 1231 del_timer_sync(&ag->oom_timer); 1232 1233 ag71xx_rings_cleanup(ag); 1234 } 1235 1236 static int ag71xx_open(struct net_device *ndev) 1237 { 1238 struct ag71xx *ag = netdev_priv(ndev); 1239 unsigned int max_frame_len; 1240 int ret; 1241 1242 max_frame_len = ag71xx_max_frame_len(ndev->mtu); 1243 ag->rx_buf_size = 1244 SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); 1245 1246 /* setup max frame length */ 1247 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); 1248 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); 1249 1250 ret = ag71xx_hw_enable(ag); 1251 if (ret) 1252 goto err; 1253 1254 ret = ag71xx_phy_connect(ag); 1255 if (ret) 1256 goto err; 1257 1258 phy_start(ndev->phydev); 1259 1260 return 0; 1261 1262 err: 1263 ag71xx_rings_cleanup(ag); 1264 return ret; 1265 } 1266 1267 static int ag71xx_stop(struct net_device *ndev) 1268 { 1269 struct ag71xx *ag = netdev_priv(ndev); 1270 1271 phy_stop(ndev->phydev); 1272 phy_disconnect(ndev->phydev); 1273 ag71xx_hw_disable(ag); 1274 1275 return 0; 1276 } 1277 1278 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) 1279 { 1280 int i, ring_mask, ndesc, split; 1281 struct ag71xx_desc *desc; 1282 1283 ring_mask = BIT(ring->order) - 1; 1284 ndesc = 0; 1285 split = ring->desc_split; 1286 1287 if (!split) 1288 split = len; 1289 1290 while (len > 0) { 1291 unsigned int cur_len = len; 1292 1293 i = (ring->curr + ndesc) & ring_mask; 1294 desc = ag71xx_ring_desc(ring, i); 1295 1296 if (!ag71xx_desc_empty(desc)) 1297 return -1; 1298 1299 if (cur_len > split) { 1300 cur_len = split; 1301 1302 /* TX will hang if DMA transfers <= 4 bytes, 1303 * make sure next segment is more than 4 bytes long. 1304 */ 1305 if (len <= split + 4) 1306 cur_len -= 4; 1307 } 1308 1309 desc->data = addr; 1310 addr += cur_len; 1311 len -= cur_len; 1312 1313 if (len > 0) 1314 cur_len |= DESC_MORE; 1315 1316 /* prevent early tx attempt of this descriptor */ 1317 if (!ndesc) 1318 cur_len |= DESC_EMPTY; 1319 1320 desc->ctrl = cur_len; 1321 ndesc++; 1322 } 1323 1324 return ndesc; 1325 } 1326 1327 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, 1328 struct net_device *ndev) 1329 { 1330 int i, n, ring_min, ring_mask, ring_size; 1331 struct ag71xx *ag = netdev_priv(ndev); 1332 struct ag71xx_ring *ring; 1333 struct ag71xx_desc *desc; 1334 dma_addr_t dma_addr; 1335 1336 ring = &ag->tx_ring; 1337 ring_mask = BIT(ring->order) - 1; 1338 ring_size = BIT(ring->order); 1339 1340 if (skb->len <= 4) { 1341 netif_dbg(ag, tx_err, ndev, "packet len is too small\n"); 1342 goto err_drop; 1343 } 1344 1345 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, 1346 DMA_TO_DEVICE); 1347 1348 i = ring->curr & ring_mask; 1349 desc = ag71xx_ring_desc(ring, i); 1350 1351 /* setup descriptor fields */ 1352 n = ag71xx_fill_dma_desc(ring, (u32)dma_addr, 1353 skb->len & ag->dcfg->desc_pktlen_mask); 1354 if (n < 0) 1355 goto err_drop_unmap; 1356 1357 i = (ring->curr + n - 1) & ring_mask; 1358 ring->buf[i].tx.len = skb->len; 1359 ring->buf[i].tx.skb = skb; 1360 1361 netdev_sent_queue(ndev, skb->len); 1362 1363 skb_tx_timestamp(skb); 1364 1365 desc->ctrl &= ~DESC_EMPTY; 1366 ring->curr += n; 1367 1368 /* flush descriptor */ 1369 wmb(); 1370 1371 ring_min = 2; 1372 if (ring->desc_split) 1373 ring_min *= AG71XX_TX_RING_DS_PER_PKT; 1374 1375 if (ring->curr - ring->dirty >= ring_size - ring_min) { 1376 netif_dbg(ag, tx_err, ndev, "tx queue full\n"); 1377 netif_stop_queue(ndev); 1378 } 1379 1380 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n"); 1381 1382 /* enable TX engine */ 1383 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); 1384 1385 return NETDEV_TX_OK; 1386 1387 err_drop_unmap: 1388 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); 1389 1390 err_drop: 1391 ndev->stats.tx_dropped++; 1392 1393 dev_kfree_skb(skb); 1394 return NETDEV_TX_OK; 1395 } 1396 1397 static void ag71xx_oom_timer_handler(struct timer_list *t) 1398 { 1399 struct ag71xx *ag = from_timer(ag, t, oom_timer); 1400 1401 napi_schedule(&ag->napi); 1402 } 1403 1404 static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1405 { 1406 struct ag71xx *ag = netdev_priv(ndev); 1407 1408 netif_err(ag, tx_err, ndev, "tx timeout\n"); 1409 1410 schedule_delayed_work(&ag->restart_work, 1); 1411 } 1412 1413 static void ag71xx_restart_work_func(struct work_struct *work) 1414 { 1415 struct ag71xx *ag = container_of(work, struct ag71xx, 1416 restart_work.work); 1417 struct net_device *ndev = ag->ndev; 1418 1419 rtnl_lock(); 1420 ag71xx_hw_disable(ag); 1421 ag71xx_hw_enable(ag); 1422 if (ndev->phydev->link) 1423 ag71xx_link_adjust(ag, false); 1424 rtnl_unlock(); 1425 } 1426 1427 static int ag71xx_rx_packets(struct ag71xx *ag, int limit) 1428 { 1429 struct net_device *ndev = ag->ndev; 1430 int ring_mask, ring_size, done = 0; 1431 unsigned int pktlen_mask, offset; 1432 struct sk_buff *next, *skb; 1433 struct ag71xx_ring *ring; 1434 struct list_head rx_list; 1435 1436 ring = &ag->rx_ring; 1437 pktlen_mask = ag->dcfg->desc_pktlen_mask; 1438 offset = ag->rx_buf_offset; 1439 ring_mask = BIT(ring->order) - 1; 1440 ring_size = BIT(ring->order); 1441 1442 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", 1443 limit, ring->curr, ring->dirty); 1444 1445 INIT_LIST_HEAD(&rx_list); 1446 1447 while (done < limit) { 1448 unsigned int i = ring->curr & ring_mask; 1449 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1450 int pktlen; 1451 int err = 0; 1452 1453 if (ag71xx_desc_empty(desc)) 1454 break; 1455 1456 if ((ring->dirty + ring_size) == ring->curr) { 1457 WARN_ONCE(1, "RX out of ring"); 1458 break; 1459 } 1460 1461 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 1462 1463 pktlen = desc->ctrl & pktlen_mask; 1464 pktlen -= ETH_FCS_LEN; 1465 1466 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, 1467 ag->rx_buf_size, DMA_FROM_DEVICE); 1468 1469 ndev->stats.rx_packets++; 1470 ndev->stats.rx_bytes += pktlen; 1471 1472 skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); 1473 if (!skb) { 1474 skb_free_frag(ring->buf[i].rx.rx_buf); 1475 goto next; 1476 } 1477 1478 skb_reserve(skb, offset); 1479 skb_put(skb, pktlen); 1480 1481 if (err) { 1482 ndev->stats.rx_dropped++; 1483 kfree_skb(skb); 1484 } else { 1485 skb->dev = ndev; 1486 skb->ip_summed = CHECKSUM_NONE; 1487 list_add_tail(&skb->list, &rx_list); 1488 } 1489 1490 next: 1491 ring->buf[i].rx.rx_buf = NULL; 1492 done++; 1493 1494 ring->curr++; 1495 } 1496 1497 ag71xx_ring_rx_refill(ag); 1498 1499 list_for_each_entry_safe(skb, next, &rx_list, list) 1500 skb->protocol = eth_type_trans(skb, ndev); 1501 netif_receive_skb_list(&rx_list); 1502 1503 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", 1504 ring->curr, ring->dirty, done); 1505 1506 return done; 1507 } 1508 1509 static int ag71xx_poll(struct napi_struct *napi, int limit) 1510 { 1511 struct ag71xx *ag = container_of(napi, struct ag71xx, napi); 1512 struct ag71xx_ring *rx_ring = &ag->rx_ring; 1513 int rx_ring_size = BIT(rx_ring->order); 1514 struct net_device *ndev = ag->ndev; 1515 int tx_done, rx_done; 1516 u32 status; 1517 1518 tx_done = ag71xx_tx_packets(ag, false); 1519 1520 netif_dbg(ag, rx_status, ndev, "processing RX ring\n"); 1521 rx_done = ag71xx_rx_packets(ag, limit); 1522 1523 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) 1524 goto oom; 1525 1526 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 1527 if (unlikely(status & RX_STATUS_OF)) { 1528 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); 1529 ndev->stats.rx_fifo_errors++; 1530 1531 /* restart RX */ 1532 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 1533 } 1534 1535 if (rx_done < limit) { 1536 if (status & RX_STATUS_PR) 1537 goto more; 1538 1539 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 1540 if (status & TX_STATUS_PS) 1541 goto more; 1542 1543 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", 1544 rx_done, tx_done, limit); 1545 1546 napi_complete(napi); 1547 1548 /* enable interrupts */ 1549 ag71xx_int_enable(ag, AG71XX_INT_POLL); 1550 return rx_done; 1551 } 1552 1553 more: 1554 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", 1555 rx_done, tx_done, limit); 1556 return limit; 1557 1558 oom: 1559 netif_err(ag, rx_err, ndev, "out of memory\n"); 1560 1561 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); 1562 napi_complete(napi); 1563 return 0; 1564 } 1565 1566 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) 1567 { 1568 struct net_device *ndev = dev_id; 1569 struct ag71xx *ag; 1570 u32 status; 1571 1572 ag = netdev_priv(ndev); 1573 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); 1574 1575 if (unlikely(!status)) 1576 return IRQ_NONE; 1577 1578 if (unlikely(status & AG71XX_INT_ERR)) { 1579 if (status & AG71XX_INT_TX_BE) { 1580 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); 1581 netif_err(ag, intr, ndev, "TX BUS error\n"); 1582 } 1583 if (status & AG71XX_INT_RX_BE) { 1584 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); 1585 netif_err(ag, intr, ndev, "RX BUS error\n"); 1586 } 1587 } 1588 1589 if (likely(status & AG71XX_INT_POLL)) { 1590 ag71xx_int_disable(ag, AG71XX_INT_POLL); 1591 netif_dbg(ag, intr, ndev, "enable polling mode\n"); 1592 napi_schedule(&ag->napi); 1593 } 1594 1595 return IRQ_HANDLED; 1596 } 1597 1598 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu) 1599 { 1600 struct ag71xx *ag = netdev_priv(ndev); 1601 1602 ndev->mtu = new_mtu; 1603 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 1604 ag71xx_max_frame_len(ndev->mtu)); 1605 1606 return 0; 1607 } 1608 1609 static const struct net_device_ops ag71xx_netdev_ops = { 1610 .ndo_open = ag71xx_open, 1611 .ndo_stop = ag71xx_stop, 1612 .ndo_start_xmit = ag71xx_hard_start_xmit, 1613 .ndo_do_ioctl = phy_do_ioctl, 1614 .ndo_tx_timeout = ag71xx_tx_timeout, 1615 .ndo_change_mtu = ag71xx_change_mtu, 1616 .ndo_set_mac_address = eth_mac_addr, 1617 .ndo_validate_addr = eth_validate_addr, 1618 }; 1619 1620 static const u32 ar71xx_addr_ar7100[] = { 1621 0x19000000, 0x1a000000, 1622 }; 1623 1624 static int ag71xx_probe(struct platform_device *pdev) 1625 { 1626 struct device_node *np = pdev->dev.of_node; 1627 const struct ag71xx_dcfg *dcfg; 1628 struct net_device *ndev; 1629 struct resource *res; 1630 const void *mac_addr; 1631 int tx_size, err, i; 1632 struct ag71xx *ag; 1633 1634 if (!np) 1635 return -ENODEV; 1636 1637 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); 1638 if (!ndev) 1639 return -ENOMEM; 1640 1641 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1642 if (!res) 1643 return -EINVAL; 1644 1645 dcfg = of_device_get_match_data(&pdev->dev); 1646 if (!dcfg) 1647 return -EINVAL; 1648 1649 ag = netdev_priv(ndev); 1650 ag->mac_idx = -1; 1651 for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) { 1652 if (ar71xx_addr_ar7100[i] == res->start) 1653 ag->mac_idx = i; 1654 } 1655 1656 if (ag->mac_idx < 0) { 1657 netif_err(ag, probe, ndev, "unknown mac idx\n"); 1658 return -EINVAL; 1659 } 1660 1661 ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); 1662 if (IS_ERR(ag->clk_eth)) { 1663 netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); 1664 return PTR_ERR(ag->clk_eth); 1665 } 1666 1667 SET_NETDEV_DEV(ndev, &pdev->dev); 1668 1669 ag->pdev = pdev; 1670 ag->ndev = ndev; 1671 ag->dcfg = dcfg; 1672 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); 1673 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); 1674 1675 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); 1676 if (IS_ERR(ag->mac_reset)) { 1677 netif_err(ag, probe, ndev, "missing mac reset\n"); 1678 err = PTR_ERR(ag->mac_reset); 1679 goto err_free; 1680 } 1681 1682 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1683 if (!ag->mac_base) { 1684 err = -ENOMEM; 1685 goto err_free; 1686 } 1687 1688 ndev->irq = platform_get_irq(pdev, 0); 1689 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 1690 0x0, dev_name(&pdev->dev), ndev); 1691 if (err) { 1692 netif_err(ag, probe, ndev, "unable to request IRQ %d\n", 1693 ndev->irq); 1694 goto err_free; 1695 } 1696 1697 ndev->netdev_ops = &ag71xx_netdev_ops; 1698 1699 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); 1700 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); 1701 1702 tx_size = AG71XX_TX_RING_SIZE_DEFAULT; 1703 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); 1704 1705 ndev->min_mtu = 68; 1706 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); 1707 1708 ag->rx_buf_offset = NET_SKB_PAD; 1709 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 1710 ag->rx_buf_offset += NET_IP_ALIGN; 1711 1712 if (ag71xx_is(ag, AR7100)) { 1713 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; 1714 tx_size *= AG71XX_TX_RING_DS_PER_PKT; 1715 } 1716 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); 1717 1718 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, 1719 sizeof(struct ag71xx_desc), 1720 &ag->stop_desc_dma, GFP_KERNEL); 1721 if (!ag->stop_desc) { 1722 err = -ENOMEM; 1723 goto err_free; 1724 } 1725 1726 ag->stop_desc->data = 0; 1727 ag->stop_desc->ctrl = 0; 1728 ag->stop_desc->next = (u32)ag->stop_desc_dma; 1729 1730 mac_addr = of_get_mac_address(np); 1731 if (!IS_ERR(mac_addr)) 1732 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 1733 if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) { 1734 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); 1735 eth_random_addr(ndev->dev_addr); 1736 } 1737 1738 err = of_get_phy_mode(np, &ag->phy_if_mode); 1739 if (err) { 1740 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); 1741 goto err_free; 1742 } 1743 1744 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); 1745 1746 err = clk_prepare_enable(ag->clk_eth); 1747 if (err) { 1748 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); 1749 goto err_free; 1750 } 1751 1752 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); 1753 1754 ag71xx_hw_init(ag); 1755 1756 err = ag71xx_mdio_probe(ag); 1757 if (err) 1758 goto err_put_clk; 1759 1760 platform_set_drvdata(pdev, ndev); 1761 1762 err = register_netdev(ndev); 1763 if (err) { 1764 netif_err(ag, probe, ndev, "unable to register net device\n"); 1765 platform_set_drvdata(pdev, NULL); 1766 goto err_mdio_remove; 1767 } 1768 1769 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", 1770 (unsigned long)ag->mac_base, ndev->irq, 1771 phy_modes(ag->phy_if_mode)); 1772 1773 return 0; 1774 1775 err_mdio_remove: 1776 ag71xx_mdio_remove(ag); 1777 err_put_clk: 1778 clk_disable_unprepare(ag->clk_eth); 1779 err_free: 1780 free_netdev(ndev); 1781 return err; 1782 } 1783 1784 static int ag71xx_remove(struct platform_device *pdev) 1785 { 1786 struct net_device *ndev = platform_get_drvdata(pdev); 1787 struct ag71xx *ag; 1788 1789 if (!ndev) 1790 return 0; 1791 1792 ag = netdev_priv(ndev); 1793 unregister_netdev(ndev); 1794 ag71xx_mdio_remove(ag); 1795 clk_disable_unprepare(ag->clk_eth); 1796 platform_set_drvdata(pdev, NULL); 1797 1798 return 0; 1799 } 1800 1801 static const u32 ar71xx_fifo_ar7100[] = { 1802 0x0fff0000, 0x00001fff, 0x00780fff, 1803 }; 1804 1805 static const u32 ar71xx_fifo_ar9130[] = { 1806 0x0fff0000, 0x00001fff, 0x008001ff, 1807 }; 1808 1809 static const u32 ar71xx_fifo_ar9330[] = { 1810 0x0010ffff, 0x015500aa, 0x01f00140, 1811 }; 1812 1813 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = { 1814 .type = AR7100, 1815 .fifodata = ar71xx_fifo_ar7100, 1816 .max_frame_len = 1540, 1817 .desc_pktlen_mask = SZ_4K - 1, 1818 .tx_hang_workaround = false, 1819 }; 1820 1821 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = { 1822 .type = AR7240, 1823 .fifodata = ar71xx_fifo_ar7100, 1824 .max_frame_len = 1540, 1825 .desc_pktlen_mask = SZ_4K - 1, 1826 .tx_hang_workaround = true, 1827 }; 1828 1829 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = { 1830 .type = AR9130, 1831 .fifodata = ar71xx_fifo_ar9130, 1832 .max_frame_len = 1540, 1833 .desc_pktlen_mask = SZ_4K - 1, 1834 .tx_hang_workaround = false, 1835 }; 1836 1837 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = { 1838 .type = AR9330, 1839 .fifodata = ar71xx_fifo_ar9330, 1840 .max_frame_len = 1540, 1841 .desc_pktlen_mask = SZ_4K - 1, 1842 .tx_hang_workaround = true, 1843 }; 1844 1845 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = { 1846 .type = AR9340, 1847 .fifodata = ar71xx_fifo_ar9330, 1848 .max_frame_len = SZ_16K - 1, 1849 .desc_pktlen_mask = SZ_16K - 1, 1850 .tx_hang_workaround = true, 1851 }; 1852 1853 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = { 1854 .type = QCA9530, 1855 .fifodata = ar71xx_fifo_ar9330, 1856 .max_frame_len = SZ_16K - 1, 1857 .desc_pktlen_mask = SZ_16K - 1, 1858 .tx_hang_workaround = true, 1859 }; 1860 1861 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = { 1862 .type = QCA9550, 1863 .fifodata = ar71xx_fifo_ar9330, 1864 .max_frame_len = 1540, 1865 .desc_pktlen_mask = SZ_16K - 1, 1866 .tx_hang_workaround = true, 1867 }; 1868 1869 static const struct of_device_id ag71xx_match[] = { 1870 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 }, 1871 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 }, 1872 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 }, 1873 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 }, 1874 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 }, 1875 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 }, 1876 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 }, 1877 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 }, 1878 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 }, 1879 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 }, 1880 {} 1881 }; 1882 1883 static struct platform_driver ag71xx_driver = { 1884 .probe = ag71xx_probe, 1885 .remove = ag71xx_remove, 1886 .driver = { 1887 .name = "ag71xx", 1888 .of_match_table = ag71xx_match, 1889 } 1890 }; 1891 1892 module_platform_driver(ag71xx_driver); 1893 MODULE_LICENSE("GPL v2"); 1894