1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * sni_ave.c - Socionext UniPhier AVE ethernet driver 4 * Copyright 2014 Panasonic Corporation 5 * Copyright 2015-2017 Socionext Inc. 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/etherdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/mii.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/of_net.h> 19 #include <linux/of_mdio.h> 20 #include <linux/of_platform.h> 21 #include <linux/phy.h> 22 #include <linux/regmap.h> 23 #include <linux/reset.h> 24 #include <linux/types.h> 25 #include <linux/u64_stats_sync.h> 26 27 /* General Register Group */ 28 #define AVE_IDR 0x000 /* ID */ 29 #define AVE_VR 0x004 /* Version */ 30 #define AVE_GRR 0x008 /* Global Reset */ 31 #define AVE_CFGR 0x00c /* Configuration */ 32 33 /* Interrupt Register Group */ 34 #define AVE_GIMR 0x100 /* Global Interrupt Mask */ 35 #define AVE_GISR 0x104 /* Global Interrupt Status */ 36 37 /* MAC Register Group */ 38 #define AVE_TXCR 0x200 /* TX Setup */ 39 #define AVE_RXCR 0x204 /* RX Setup */ 40 #define AVE_RXMAC1R 0x208 /* MAC address (lower) */ 41 #define AVE_RXMAC2R 0x20c /* MAC address (upper) */ 42 #define AVE_MDIOCTR 0x214 /* MDIO Control */ 43 #define AVE_MDIOAR 0x218 /* MDIO Address */ 44 #define AVE_MDIOWDR 0x21c /* MDIO Data */ 45 #define AVE_MDIOSR 0x220 /* MDIO Status */ 46 #define AVE_MDIORDR 0x224 /* MDIO Rd Data */ 47 48 /* Descriptor Control Register Group */ 49 #define AVE_DESCC 0x300 /* Descriptor Control */ 50 #define AVE_TXDC 0x304 /* TX Descriptor Configuration */ 51 #define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */ 52 #define AVE_IIRQC 0x34c /* Interval IRQ Control */ 53 54 /* Packet Filter Register Group */ 55 #define AVE_PKTF_BASE 0x800 /* PF Base Address */ 56 #define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */ 57 #define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */ 58 #define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */ 59 #define AVE_PFEN 0xffc /* Packet Filter Enable */ 60 #define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) 61 #define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) 62 #define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) 63 #define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) 64 65 /* 64bit descriptor memory */ 66 #define AVE_DESC_SIZE_64 12 /* Descriptor Size */ 67 68 #define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */ 69 #define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */ 70 71 #define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */ 72 #define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */ 73 74 /* 32bit descriptor memory */ 75 #define AVE_DESC_SIZE_32 8 /* Descriptor Size */ 76 77 #define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */ 78 #define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */ 79 80 #define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */ 81 #define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */ 82 83 /* RMII Bridge Register Group */ 84 #define AVE_RSTCTRL 0x8028 /* Reset control */ 85 #define AVE_RSTCTRL_RMIIRST BIT(16) 86 #define AVE_LINKSEL 0x8034 /* Link speed setting */ 87 #define AVE_LINKSEL_100M BIT(0) 88 89 /* AVE_GRR */ 90 #define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */ 91 #define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */ 92 #define AVE_GRR_GRST BIT(0) /* Reset all MAC */ 93 94 /* AVE_CFGR */ 95 #define AVE_CFGR_FLE BIT(31) /* Filter Function */ 96 #define AVE_CFGR_CHE BIT(30) /* Checksum Function */ 97 #define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */ 98 #define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */ 99 100 /* AVE_GISR (common with GIMR) */ 101 #define AVE_GI_PHY BIT(24) /* PHY interrupt */ 102 #define AVE_GI_TX BIT(16) /* Tx complete */ 103 #define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */ 104 #define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */ 105 #define AVE_GI_RXDROP BIT(6) /* Drop packet */ 106 #define AVE_GI_RXIINT BIT(5) /* Interval interrupt */ 107 108 /* AVE_TXCR */ 109 #define AVE_TXCR_FLOCTR BIT(18) /* Flow control */ 110 #define AVE_TXCR_TXSPD_1G BIT(17) 111 #define AVE_TXCR_TXSPD_100 BIT(16) 112 113 /* AVE_RXCR */ 114 #define AVE_RXCR_RXEN BIT(30) /* Rx enable */ 115 #define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */ 116 #define AVE_RXCR_FLOCTR BIT(21) /* Flow control */ 117 #define AVE_RXCR_AFEN BIT(19) /* MAC address filter */ 118 #define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */ 119 #define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0) 120 121 /* AVE_MDIOCTR */ 122 #define AVE_MDIOCTR_RREQ BIT(3) /* Read request */ 123 #define AVE_MDIOCTR_WREQ BIT(2) /* Write request */ 124 125 /* AVE_MDIOSR */ 126 #define AVE_MDIOSR_STS BIT(0) /* access status */ 127 128 /* AVE_DESCC */ 129 #define AVE_DESCC_STATUS_MASK GENMASK(31, 16) 130 #define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */ 131 #define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */ 132 #define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */ 133 134 /* AVE_TXDC */ 135 #define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */ 136 #define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */ 137 #define AVE_TXDC_ADDR_START 0 138 139 /* AVE_RXDC0 */ 140 #define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */ 141 #define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */ 142 #define AVE_RXDC0_ADDR_START 0 143 144 /* AVE_IIRQC */ 145 #define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */ 146 #define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */ 147 148 /* Command status for descriptor */ 149 #define AVE_STS_OWN BIT(31) /* Descriptor ownership */ 150 #define AVE_STS_INTR BIT(29) /* Request for interrupt */ 151 #define AVE_STS_OK BIT(27) /* Normal transmit */ 152 /* TX */ 153 #define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */ 154 #define AVE_STS_1ST BIT(26) /* Head of buffer chain */ 155 #define AVE_STS_LAST BIT(25) /* Tail of buffer chain */ 156 #define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */ 157 #define AVE_STS_EC BIT(20) /* Excess collision occurred */ 158 #define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0) 159 /* RX */ 160 #define AVE_STS_CSSV BIT(21) /* Checksum check performed */ 161 #define AVE_STS_CSER BIT(20) /* Checksum error detected */ 162 #define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0) 163 164 /* Packet filter */ 165 #define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0)) 166 #define AVE_PFMBYTE_MASK1 GENMASK(25, 0) 167 #define AVE_PFMBIT_MASK GENMASK(15, 0) 168 169 #define AVE_PF_SIZE 17 /* Number of all packet filter */ 170 #define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */ 171 172 #define AVE_PFNUM_FILTER 0 /* No.0 */ 173 #define AVE_PFNUM_UNICAST 1 /* No.1 */ 174 #define AVE_PFNUM_BROADCAST 2 /* No.2 */ 175 #define AVE_PFNUM_MULTICAST 11 /* No.11-17 */ 176 177 /* NETIF Message control */ 178 #define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ 179 NETIF_MSG_PROBE | \ 180 NETIF_MSG_LINK | \ 181 NETIF_MSG_TIMER | \ 182 NETIF_MSG_IFDOWN | \ 183 NETIF_MSG_IFUP | \ 184 NETIF_MSG_RX_ERR | \ 185 NETIF_MSG_TX_ERR) 186 187 /* Parameter for descriptor */ 188 #define AVE_NR_TXDESC 32 /* Tx descriptor */ 189 #define AVE_NR_RXDESC 64 /* Rx descriptor */ 190 191 #define AVE_DESC_OFS_CMDSTS 0 192 #define AVE_DESC_OFS_ADDRL 4 193 #define AVE_DESC_OFS_ADDRU 8 194 195 /* Parameter for ethernet frame */ 196 #define AVE_MAX_ETHFRAME 1518 197 198 /* Parameter for interrupt */ 199 #define AVE_INTM_COUNT 20 200 #define AVE_FORCE_TXINTCNT 1 201 202 /* SG */ 203 #define SG_ETPINMODE 0x540 204 #define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */ 205 #define SG_ETPINMODE_RMII(ins) BIT(ins) 206 207 #define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) 208 209 #define AVE_MAX_CLKS 4 210 #define AVE_MAX_RSTS 2 211 212 enum desc_id { 213 AVE_DESCID_RX, 214 AVE_DESCID_TX, 215 }; 216 217 enum desc_state { 218 AVE_DESC_RX_PERMIT, 219 AVE_DESC_RX_SUSPEND, 220 AVE_DESC_START, 221 AVE_DESC_STOP, 222 }; 223 224 struct ave_desc { 225 struct sk_buff *skbs; 226 dma_addr_t skbs_dma; 227 size_t skbs_dmalen; 228 }; 229 230 struct ave_desc_info { 231 u32 ndesc; /* number of descriptor */ 232 u32 daddr; /* start address of descriptor */ 233 u32 proc_idx; /* index of processing packet */ 234 u32 done_idx; /* index of processed packet */ 235 struct ave_desc *desc; /* skb info related descriptor */ 236 }; 237 238 struct ave_stats { 239 struct u64_stats_sync syncp; 240 u64 packets; 241 u64 bytes; 242 u64 errors; 243 u64 dropped; 244 u64 collisions; 245 u64 fifo_errors; 246 }; 247 248 struct ave_private { 249 void __iomem *base; 250 int irq; 251 int phy_id; 252 unsigned int desc_size; 253 u32 msg_enable; 254 int nclks; 255 struct clk *clk[AVE_MAX_CLKS]; 256 int nrsts; 257 struct reset_control *rst[AVE_MAX_RSTS]; 258 phy_interface_t phy_mode; 259 struct phy_device *phydev; 260 struct mii_bus *mdio; 261 struct regmap *regmap; 262 unsigned int pinmode_mask; 263 unsigned int pinmode_val; 264 u32 wolopts; 265 266 /* stats */ 267 struct ave_stats stats_rx; 268 struct ave_stats stats_tx; 269 270 /* NAPI support */ 271 struct net_device *ndev; 272 struct napi_struct napi_rx; 273 struct napi_struct napi_tx; 274 275 /* descriptor */ 276 struct ave_desc_info rx; 277 struct ave_desc_info tx; 278 279 /* flow control */ 280 int pause_auto; 281 int pause_rx; 282 int pause_tx; 283 284 const struct ave_soc_data *data; 285 }; 286 287 struct ave_soc_data { 288 bool is_desc_64bit; 289 const char *clock_names[AVE_MAX_CLKS]; 290 const char *reset_names[AVE_MAX_RSTS]; 291 int (*get_pinmode)(struct ave_private *priv, 292 phy_interface_t phy_mode, u32 arg); 293 }; 294 295 static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, 296 int offset) 297 { 298 struct ave_private *priv = netdev_priv(ndev); 299 u32 addr; 300 301 addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) 302 + entry * priv->desc_size + offset; 303 304 return readl(priv->base + addr); 305 } 306 307 static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id, 308 int entry) 309 { 310 return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS); 311 } 312 313 static void ave_desc_write(struct net_device *ndev, enum desc_id id, 314 int entry, int offset, u32 val) 315 { 316 struct ave_private *priv = netdev_priv(ndev); 317 u32 addr; 318 319 addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) 320 + entry * priv->desc_size + offset; 321 322 writel(val, priv->base + addr); 323 } 324 325 static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id, 326 int entry, u32 val) 327 { 328 ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val); 329 } 330 331 static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id, 332 int entry, dma_addr_t paddr) 333 { 334 struct ave_private *priv = netdev_priv(ndev); 335 336 ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL, 337 lower_32_bits(paddr)); 338 if (IS_DESC_64BIT(priv)) 339 ave_desc_write(ndev, id, 340 entry, AVE_DESC_OFS_ADDRU, 341 upper_32_bits(paddr)); 342 } 343 344 static u32 ave_irq_disable_all(struct net_device *ndev) 345 { 346 struct ave_private *priv = netdev_priv(ndev); 347 u32 ret; 348 349 ret = readl(priv->base + AVE_GIMR); 350 writel(0, priv->base + AVE_GIMR); 351 352 return ret; 353 } 354 355 static void ave_irq_restore(struct net_device *ndev, u32 val) 356 { 357 struct ave_private *priv = netdev_priv(ndev); 358 359 writel(val, priv->base + AVE_GIMR); 360 } 361 362 static void ave_irq_enable(struct net_device *ndev, u32 bitflag) 363 { 364 struct ave_private *priv = netdev_priv(ndev); 365 366 writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR); 367 writel(bitflag, priv->base + AVE_GISR); 368 } 369 370 static void ave_hw_write_macaddr(struct net_device *ndev, 371 const unsigned char *mac_addr, 372 int reg1, int reg2) 373 { 374 struct ave_private *priv = netdev_priv(ndev); 375 376 writel(mac_addr[0] | mac_addr[1] << 8 | 377 mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1); 378 writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2); 379 } 380 381 static void ave_hw_read_version(struct net_device *ndev, char *buf, int len) 382 { 383 struct ave_private *priv = netdev_priv(ndev); 384 u32 major, minor, vr; 385 386 vr = readl(priv->base + AVE_VR); 387 major = (vr & GENMASK(15, 8)) >> 8; 388 minor = (vr & GENMASK(7, 0)); 389 snprintf(buf, len, "v%u.%u", major, minor); 390 } 391 392 static void ave_ethtool_get_drvinfo(struct net_device *ndev, 393 struct ethtool_drvinfo *info) 394 { 395 struct device *dev = ndev->dev.parent; 396 397 strlcpy(info->driver, dev->driver->name, sizeof(info->driver)); 398 strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info)); 399 ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version)); 400 } 401 402 static u32 ave_ethtool_get_msglevel(struct net_device *ndev) 403 { 404 struct ave_private *priv = netdev_priv(ndev); 405 406 return priv->msg_enable; 407 } 408 409 static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val) 410 { 411 struct ave_private *priv = netdev_priv(ndev); 412 413 priv->msg_enable = val; 414 } 415 416 static void ave_ethtool_get_wol(struct net_device *ndev, 417 struct ethtool_wolinfo *wol) 418 { 419 wol->supported = 0; 420 wol->wolopts = 0; 421 422 if (ndev->phydev) 423 phy_ethtool_get_wol(ndev->phydev, wol); 424 } 425 426 static int ave_ethtool_set_wol(struct net_device *ndev, 427 struct ethtool_wolinfo *wol) 428 { 429 int ret; 430 431 if (!ndev->phydev || 432 (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) 433 return -EOPNOTSUPP; 434 435 ret = phy_ethtool_set_wol(ndev->phydev, wol); 436 if (!ret) 437 device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); 438 439 return ret; 440 } 441 442 static void ave_ethtool_get_pauseparam(struct net_device *ndev, 443 struct ethtool_pauseparam *pause) 444 { 445 struct ave_private *priv = netdev_priv(ndev); 446 447 pause->autoneg = priv->pause_auto; 448 pause->rx_pause = priv->pause_rx; 449 pause->tx_pause = priv->pause_tx; 450 } 451 452 static int ave_ethtool_set_pauseparam(struct net_device *ndev, 453 struct ethtool_pauseparam *pause) 454 { 455 struct ave_private *priv = netdev_priv(ndev); 456 struct phy_device *phydev = ndev->phydev; 457 458 if (!phydev) 459 return -EINVAL; 460 461 priv->pause_auto = pause->autoneg; 462 priv->pause_rx = pause->rx_pause; 463 priv->pause_tx = pause->tx_pause; 464 465 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); 466 467 return 0; 468 } 469 470 static const struct ethtool_ops ave_ethtool_ops = { 471 .get_link_ksettings = phy_ethtool_get_link_ksettings, 472 .set_link_ksettings = phy_ethtool_set_link_ksettings, 473 .get_drvinfo = ave_ethtool_get_drvinfo, 474 .nway_reset = phy_ethtool_nway_reset, 475 .get_link = ethtool_op_get_link, 476 .get_msglevel = ave_ethtool_get_msglevel, 477 .set_msglevel = ave_ethtool_set_msglevel, 478 .get_wol = ave_ethtool_get_wol, 479 .set_wol = ave_ethtool_set_wol, 480 .get_pauseparam = ave_ethtool_get_pauseparam, 481 .set_pauseparam = ave_ethtool_set_pauseparam, 482 }; 483 484 static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum) 485 { 486 struct net_device *ndev = bus->priv; 487 struct ave_private *priv; 488 u32 mdioctl, mdiosr; 489 int ret; 490 491 priv = netdev_priv(ndev); 492 493 /* write address */ 494 writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); 495 496 /* read request */ 497 mdioctl = readl(priv->base + AVE_MDIOCTR); 498 writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ, 499 priv->base + AVE_MDIOCTR); 500 501 ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, 502 !(mdiosr & AVE_MDIOSR_STS), 20, 2000); 503 if (ret) { 504 netdev_err(ndev, "failed to read (phy:%d reg:%x)\n", 505 phyid, regnum); 506 return ret; 507 } 508 509 return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0); 510 } 511 512 static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum, 513 u16 val) 514 { 515 struct net_device *ndev = bus->priv; 516 struct ave_private *priv; 517 u32 mdioctl, mdiosr; 518 int ret; 519 520 priv = netdev_priv(ndev); 521 522 /* write address */ 523 writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); 524 525 /* write data */ 526 writel(val, priv->base + AVE_MDIOWDR); 527 528 /* write request */ 529 mdioctl = readl(priv->base + AVE_MDIOCTR); 530 writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ, 531 priv->base + AVE_MDIOCTR); 532 533 ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, 534 !(mdiosr & AVE_MDIOSR_STS), 20, 2000); 535 if (ret) 536 netdev_err(ndev, "failed to write (phy:%d reg:%x)\n", 537 phyid, regnum); 538 539 return ret; 540 } 541 542 static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc, 543 void *ptr, size_t len, enum dma_data_direction dir, 544 dma_addr_t *paddr) 545 { 546 dma_addr_t map_addr; 547 548 map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir); 549 if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr))) 550 return -ENOMEM; 551 552 desc->skbs_dma = map_addr; 553 desc->skbs_dmalen = len; 554 *paddr = map_addr; 555 556 return 0; 557 } 558 559 static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc, 560 enum dma_data_direction dir) 561 { 562 if (!desc->skbs_dma) 563 return; 564 565 dma_unmap_single(ndev->dev.parent, 566 desc->skbs_dma, desc->skbs_dmalen, dir); 567 desc->skbs_dma = 0; 568 } 569 570 /* Prepare Rx descriptor and memory */ 571 static int ave_rxdesc_prepare(struct net_device *ndev, int entry) 572 { 573 struct ave_private *priv = netdev_priv(ndev); 574 struct sk_buff *skb; 575 dma_addr_t paddr; 576 int ret; 577 578 skb = priv->rx.desc[entry].skbs; 579 if (!skb) { 580 skb = netdev_alloc_skb_ip_align(ndev, 581 AVE_MAX_ETHFRAME); 582 if (!skb) { 583 netdev_err(ndev, "can't allocate skb for Rx\n"); 584 return -ENOMEM; 585 } 586 } 587 588 /* set disable to cmdsts */ 589 ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, 590 AVE_STS_INTR | AVE_STS_OWN); 591 592 /* map Rx buffer 593 * Rx buffer set to the Rx descriptor has two restrictions: 594 * - Rx buffer address is 4 byte aligned. 595 * - Rx buffer begins with 2 byte headroom, and data will be put from 596 * (buffer + 2). 597 * To satisfy this, specify the address to put back the buffer 598 * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), 599 * and expand the map size by NET_IP_ALIGN. 600 */ 601 ret = ave_dma_map(ndev, &priv->rx.desc[entry], 602 skb->data - NET_IP_ALIGN, 603 AVE_MAX_ETHFRAME + NET_IP_ALIGN, 604 DMA_FROM_DEVICE, &paddr); 605 if (ret) { 606 netdev_err(ndev, "can't map skb for Rx\n"); 607 dev_kfree_skb_any(skb); 608 return ret; 609 } 610 priv->rx.desc[entry].skbs = skb; 611 612 /* set buffer pointer */ 613 ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr); 614 615 /* set enable to cmdsts */ 616 ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, 617 AVE_STS_INTR | AVE_MAX_ETHFRAME); 618 619 return ret; 620 } 621 622 /* Switch state of descriptor */ 623 static int ave_desc_switch(struct net_device *ndev, enum desc_state state) 624 { 625 struct ave_private *priv = netdev_priv(ndev); 626 int ret = 0; 627 u32 val; 628 629 switch (state) { 630 case AVE_DESC_START: 631 writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC); 632 break; 633 634 case AVE_DESC_STOP: 635 writel(0, priv->base + AVE_DESCC); 636 if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val, 637 150, 15000)) { 638 netdev_err(ndev, "can't stop descriptor\n"); 639 ret = -EBUSY; 640 } 641 break; 642 643 case AVE_DESC_RX_SUSPEND: 644 val = readl(priv->base + AVE_DESCC); 645 val |= AVE_DESCC_RDSTP; 646 val &= ~AVE_DESCC_STATUS_MASK; 647 writel(val, priv->base + AVE_DESCC); 648 if (readl_poll_timeout(priv->base + AVE_DESCC, val, 649 val & (AVE_DESCC_RDSTP << 16), 650 150, 150000)) { 651 netdev_err(ndev, "can't suspend descriptor\n"); 652 ret = -EBUSY; 653 } 654 break; 655 656 case AVE_DESC_RX_PERMIT: 657 val = readl(priv->base + AVE_DESCC); 658 val &= ~AVE_DESCC_RDSTP; 659 val &= ~AVE_DESCC_STATUS_MASK; 660 writel(val, priv->base + AVE_DESCC); 661 break; 662 663 default: 664 ret = -EINVAL; 665 break; 666 } 667 668 return ret; 669 } 670 671 static int ave_tx_complete(struct net_device *ndev) 672 { 673 struct ave_private *priv = netdev_priv(ndev); 674 u32 proc_idx, done_idx, ndesc, cmdsts; 675 unsigned int nr_freebuf = 0; 676 unsigned int tx_packets = 0; 677 unsigned int tx_bytes = 0; 678 679 proc_idx = priv->tx.proc_idx; 680 done_idx = priv->tx.done_idx; 681 ndesc = priv->tx.ndesc; 682 683 /* free pre-stored skb from done_idx to proc_idx */ 684 while (proc_idx != done_idx) { 685 cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx); 686 687 /* do nothing if owner is HW (==1 for Tx) */ 688 if (cmdsts & AVE_STS_OWN) 689 break; 690 691 /* check Tx status and updates statistics */ 692 if (cmdsts & AVE_STS_OK) { 693 tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK; 694 /* success */ 695 if (cmdsts & AVE_STS_LAST) 696 tx_packets++; 697 } else { 698 /* error */ 699 if (cmdsts & AVE_STS_LAST) { 700 priv->stats_tx.errors++; 701 if (cmdsts & (AVE_STS_OWC | AVE_STS_EC)) 702 priv->stats_tx.collisions++; 703 } 704 } 705 706 /* release skb */ 707 if (priv->tx.desc[done_idx].skbs) { 708 ave_dma_unmap(ndev, &priv->tx.desc[done_idx], 709 DMA_TO_DEVICE); 710 dev_consume_skb_any(priv->tx.desc[done_idx].skbs); 711 priv->tx.desc[done_idx].skbs = NULL; 712 nr_freebuf++; 713 } 714 done_idx = (done_idx + 1) % ndesc; 715 } 716 717 priv->tx.done_idx = done_idx; 718 719 /* update stats */ 720 u64_stats_update_begin(&priv->stats_tx.syncp); 721 priv->stats_tx.packets += tx_packets; 722 priv->stats_tx.bytes += tx_bytes; 723 u64_stats_update_end(&priv->stats_tx.syncp); 724 725 /* wake queue for freeing buffer */ 726 if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf) 727 netif_wake_queue(ndev); 728 729 return nr_freebuf; 730 } 731 732 static int ave_rx_receive(struct net_device *ndev, int num) 733 { 734 struct ave_private *priv = netdev_priv(ndev); 735 unsigned int rx_packets = 0; 736 unsigned int rx_bytes = 0; 737 u32 proc_idx, done_idx; 738 struct sk_buff *skb; 739 unsigned int pktlen; 740 int restpkt, npkts; 741 u32 ndesc, cmdsts; 742 743 proc_idx = priv->rx.proc_idx; 744 done_idx = priv->rx.done_idx; 745 ndesc = priv->rx.ndesc; 746 restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc; 747 748 for (npkts = 0; npkts < num; npkts++) { 749 /* we can't receive more packet, so fill desc quickly */ 750 if (--restpkt < 0) 751 break; 752 753 cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx); 754 755 /* do nothing if owner is HW (==0 for Rx) */ 756 if (!(cmdsts & AVE_STS_OWN)) 757 break; 758 759 if (!(cmdsts & AVE_STS_OK)) { 760 priv->stats_rx.errors++; 761 proc_idx = (proc_idx + 1) % ndesc; 762 continue; 763 } 764 765 pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK; 766 767 /* get skbuff for rx */ 768 skb = priv->rx.desc[proc_idx].skbs; 769 priv->rx.desc[proc_idx].skbs = NULL; 770 771 ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE); 772 773 skb->dev = ndev; 774 skb_put(skb, pktlen); 775 skb->protocol = eth_type_trans(skb, ndev); 776 777 if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER))) 778 skb->ip_summed = CHECKSUM_UNNECESSARY; 779 780 rx_packets++; 781 rx_bytes += pktlen; 782 783 netif_receive_skb(skb); 784 785 proc_idx = (proc_idx + 1) % ndesc; 786 } 787 788 priv->rx.proc_idx = proc_idx; 789 790 /* update stats */ 791 u64_stats_update_begin(&priv->stats_rx.syncp); 792 priv->stats_rx.packets += rx_packets; 793 priv->stats_rx.bytes += rx_bytes; 794 u64_stats_update_end(&priv->stats_rx.syncp); 795 796 /* refill the Rx buffers */ 797 while (proc_idx != done_idx) { 798 if (ave_rxdesc_prepare(ndev, done_idx)) 799 break; 800 done_idx = (done_idx + 1) % ndesc; 801 } 802 803 priv->rx.done_idx = done_idx; 804 805 return npkts; 806 } 807 808 static int ave_napi_poll_rx(struct napi_struct *napi, int budget) 809 { 810 struct ave_private *priv; 811 struct net_device *ndev; 812 int num; 813 814 priv = container_of(napi, struct ave_private, napi_rx); 815 ndev = priv->ndev; 816 817 num = ave_rx_receive(ndev, budget); 818 if (num < budget) { 819 napi_complete_done(napi, num); 820 821 /* enable Rx interrupt when NAPI finishes */ 822 ave_irq_enable(ndev, AVE_GI_RXIINT); 823 } 824 825 return num; 826 } 827 828 static int ave_napi_poll_tx(struct napi_struct *napi, int budget) 829 { 830 struct ave_private *priv; 831 struct net_device *ndev; 832 int num; 833 834 priv = container_of(napi, struct ave_private, napi_tx); 835 ndev = priv->ndev; 836 837 num = ave_tx_complete(ndev); 838 napi_complete(napi); 839 840 /* enable Tx interrupt when NAPI finishes */ 841 ave_irq_enable(ndev, AVE_GI_TX); 842 843 return num; 844 } 845 846 static void ave_global_reset(struct net_device *ndev) 847 { 848 struct ave_private *priv = netdev_priv(ndev); 849 u32 val; 850 851 /* set config register */ 852 val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE; 853 if (!phy_interface_mode_is_rgmii(priv->phy_mode)) 854 val |= AVE_CFGR_MII; 855 writel(val, priv->base + AVE_CFGR); 856 857 /* reset RMII register */ 858 val = readl(priv->base + AVE_RSTCTRL); 859 val &= ~AVE_RSTCTRL_RMIIRST; 860 writel(val, priv->base + AVE_RSTCTRL); 861 862 /* assert reset */ 863 writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR); 864 msleep(20); 865 866 /* 1st, negate PHY reset only */ 867 writel(AVE_GRR_GRST, priv->base + AVE_GRR); 868 msleep(40); 869 870 /* negate reset */ 871 writel(0, priv->base + AVE_GRR); 872 msleep(40); 873 874 /* negate RMII register */ 875 val = readl(priv->base + AVE_RSTCTRL); 876 val |= AVE_RSTCTRL_RMIIRST; 877 writel(val, priv->base + AVE_RSTCTRL); 878 879 ave_irq_disable_all(ndev); 880 } 881 882 static void ave_rxfifo_reset(struct net_device *ndev) 883 { 884 struct ave_private *priv = netdev_priv(ndev); 885 u32 rxcr_org; 886 887 /* save and disable MAC receive op */ 888 rxcr_org = readl(priv->base + AVE_RXCR); 889 writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR); 890 891 /* suspend Rx descriptor */ 892 ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND); 893 894 /* receive all packets before descriptor starts */ 895 ave_rx_receive(ndev, priv->rx.ndesc); 896 897 /* assert reset */ 898 writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); 899 udelay(50); 900 901 /* negate reset */ 902 writel(0, priv->base + AVE_GRR); 903 udelay(20); 904 905 /* negate interrupt status */ 906 writel(AVE_GI_RXOVF, priv->base + AVE_GISR); 907 908 /* permit descriptor */ 909 ave_desc_switch(ndev, AVE_DESC_RX_PERMIT); 910 911 /* restore MAC reccieve op */ 912 writel(rxcr_org, priv->base + AVE_RXCR); 913 } 914 915 static irqreturn_t ave_irq_handler(int irq, void *netdev) 916 { 917 struct net_device *ndev = (struct net_device *)netdev; 918 struct ave_private *priv = netdev_priv(ndev); 919 u32 gimr_val, gisr_val; 920 921 gimr_val = ave_irq_disable_all(ndev); 922 923 /* get interrupt status */ 924 gisr_val = readl(priv->base + AVE_GISR); 925 926 /* PHY */ 927 if (gisr_val & AVE_GI_PHY) 928 writel(AVE_GI_PHY, priv->base + AVE_GISR); 929 930 /* check exceeding packet */ 931 if (gisr_val & AVE_GI_RXERR) { 932 writel(AVE_GI_RXERR, priv->base + AVE_GISR); 933 netdev_err(ndev, "receive a packet exceeding frame buffer\n"); 934 } 935 936 gisr_val &= gimr_val; 937 if (!gisr_val) 938 goto exit_isr; 939 940 /* RxFIFO overflow */ 941 if (gisr_val & AVE_GI_RXOVF) { 942 priv->stats_rx.fifo_errors++; 943 ave_rxfifo_reset(ndev); 944 goto exit_isr; 945 } 946 947 /* Rx drop */ 948 if (gisr_val & AVE_GI_RXDROP) { 949 priv->stats_rx.dropped++; 950 writel(AVE_GI_RXDROP, priv->base + AVE_GISR); 951 } 952 953 /* Rx interval */ 954 if (gisr_val & AVE_GI_RXIINT) { 955 napi_schedule(&priv->napi_rx); 956 /* still force to disable Rx interrupt until NAPI finishes */ 957 gimr_val &= ~AVE_GI_RXIINT; 958 } 959 960 /* Tx completed */ 961 if (gisr_val & AVE_GI_TX) { 962 napi_schedule(&priv->napi_tx); 963 /* still force to disable Tx interrupt until NAPI finishes */ 964 gimr_val &= ~AVE_GI_TX; 965 } 966 967 exit_isr: 968 ave_irq_restore(ndev, gimr_val); 969 970 return IRQ_HANDLED; 971 } 972 973 static int ave_pfsel_start(struct net_device *ndev, unsigned int entry) 974 { 975 struct ave_private *priv = netdev_priv(ndev); 976 u32 val; 977 978 if (WARN_ON(entry > AVE_PF_SIZE)) 979 return -EINVAL; 980 981 val = readl(priv->base + AVE_PFEN); 982 writel(val | BIT(entry), priv->base + AVE_PFEN); 983 984 return 0; 985 } 986 987 static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry) 988 { 989 struct ave_private *priv = netdev_priv(ndev); 990 u32 val; 991 992 if (WARN_ON(entry > AVE_PF_SIZE)) 993 return -EINVAL; 994 995 val = readl(priv->base + AVE_PFEN); 996 writel(val & ~BIT(entry), priv->base + AVE_PFEN); 997 998 return 0; 999 } 1000 1001 static int ave_pfsel_set_macaddr(struct net_device *ndev, 1002 unsigned int entry, 1003 const unsigned char *mac_addr, 1004 unsigned int set_size) 1005 { 1006 struct ave_private *priv = netdev_priv(ndev); 1007 1008 if (WARN_ON(entry > AVE_PF_SIZE)) 1009 return -EINVAL; 1010 if (WARN_ON(set_size > 6)) 1011 return -EINVAL; 1012 1013 ave_pfsel_stop(ndev, entry); 1014 1015 /* set MAC address for the filter */ 1016 ave_hw_write_macaddr(ndev, mac_addr, 1017 AVE_PKTF(entry), AVE_PKTF(entry) + 4); 1018 1019 /* set byte mask */ 1020 writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0, 1021 priv->base + AVE_PFMBYTE(entry)); 1022 writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); 1023 1024 /* set bit mask filter */ 1025 writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); 1026 1027 /* set selector to ring 0 */ 1028 writel(0, priv->base + AVE_PFSEL(entry)); 1029 1030 /* restart filter */ 1031 ave_pfsel_start(ndev, entry); 1032 1033 return 0; 1034 } 1035 1036 static void ave_pfsel_set_promisc(struct net_device *ndev, 1037 unsigned int entry, u32 rxring) 1038 { 1039 struct ave_private *priv = netdev_priv(ndev); 1040 1041 if (WARN_ON(entry > AVE_PF_SIZE)) 1042 return; 1043 1044 ave_pfsel_stop(ndev, entry); 1045 1046 /* set byte mask */ 1047 writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); 1048 writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); 1049 1050 /* set bit mask filter */ 1051 writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); 1052 1053 /* set selector to rxring */ 1054 writel(rxring, priv->base + AVE_PFSEL(entry)); 1055 1056 ave_pfsel_start(ndev, entry); 1057 } 1058 1059 static void ave_pfsel_init(struct net_device *ndev) 1060 { 1061 unsigned char bcast_mac[ETH_ALEN]; 1062 int i; 1063 1064 eth_broadcast_addr(bcast_mac); 1065 1066 for (i = 0; i < AVE_PF_SIZE; i++) 1067 ave_pfsel_stop(ndev, i); 1068 1069 /* promiscious entry, select ring 0 */ 1070 ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0); 1071 1072 /* unicast entry */ 1073 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); 1074 1075 /* broadcast entry */ 1076 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6); 1077 } 1078 1079 static void ave_phy_adjust_link(struct net_device *ndev) 1080 { 1081 struct ave_private *priv = netdev_priv(ndev); 1082 struct phy_device *phydev = ndev->phydev; 1083 u32 val, txcr, rxcr, rxcr_org; 1084 u16 rmt_adv = 0, lcl_adv = 0; 1085 u8 cap; 1086 1087 /* set RGMII speed */ 1088 val = readl(priv->base + AVE_TXCR); 1089 val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G); 1090 1091 if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000) 1092 val |= AVE_TXCR_TXSPD_1G; 1093 else if (phydev->speed == SPEED_100) 1094 val |= AVE_TXCR_TXSPD_100; 1095 1096 writel(val, priv->base + AVE_TXCR); 1097 1098 /* set RMII speed (100M/10M only) */ 1099 if (!phy_interface_is_rgmii(phydev)) { 1100 val = readl(priv->base + AVE_LINKSEL); 1101 if (phydev->speed == SPEED_10) 1102 val &= ~AVE_LINKSEL_100M; 1103 else 1104 val |= AVE_LINKSEL_100M; 1105 writel(val, priv->base + AVE_LINKSEL); 1106 } 1107 1108 /* check current RXCR/TXCR */ 1109 rxcr = readl(priv->base + AVE_RXCR); 1110 txcr = readl(priv->base + AVE_TXCR); 1111 rxcr_org = rxcr; 1112 1113 if (phydev->duplex) { 1114 rxcr |= AVE_RXCR_FDUPEN; 1115 1116 if (phydev->pause) 1117 rmt_adv |= LPA_PAUSE_CAP; 1118 if (phydev->asym_pause) 1119 rmt_adv |= LPA_PAUSE_ASYM; 1120 1121 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); 1122 cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 1123 if (cap & FLOW_CTRL_TX) 1124 txcr |= AVE_TXCR_FLOCTR; 1125 else 1126 txcr &= ~AVE_TXCR_FLOCTR; 1127 if (cap & FLOW_CTRL_RX) 1128 rxcr |= AVE_RXCR_FLOCTR; 1129 else 1130 rxcr &= ~AVE_RXCR_FLOCTR; 1131 } else { 1132 rxcr &= ~AVE_RXCR_FDUPEN; 1133 rxcr &= ~AVE_RXCR_FLOCTR; 1134 txcr &= ~AVE_TXCR_FLOCTR; 1135 } 1136 1137 if (rxcr_org != rxcr) { 1138 /* disable Rx mac */ 1139 writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR); 1140 /* change and enable TX/Rx mac */ 1141 writel(txcr, priv->base + AVE_TXCR); 1142 writel(rxcr, priv->base + AVE_RXCR); 1143 } 1144 1145 phy_print_status(phydev); 1146 } 1147 1148 static void ave_macaddr_init(struct net_device *ndev) 1149 { 1150 ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R); 1151 1152 /* pfsel unicast entry */ 1153 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); 1154 } 1155 1156 static int ave_init(struct net_device *ndev) 1157 { 1158 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1159 struct ave_private *priv = netdev_priv(ndev); 1160 struct device *dev = ndev->dev.parent; 1161 struct device_node *np = dev->of_node; 1162 struct device_node *mdio_np; 1163 struct phy_device *phydev; 1164 int nc, nr, ret; 1165 1166 /* enable clk because of hw access until ndo_open */ 1167 for (nc = 0; nc < priv->nclks; nc++) { 1168 ret = clk_prepare_enable(priv->clk[nc]); 1169 if (ret) { 1170 dev_err(dev, "can't enable clock\n"); 1171 goto out_clk_disable; 1172 } 1173 } 1174 1175 for (nr = 0; nr < priv->nrsts; nr++) { 1176 ret = reset_control_deassert(priv->rst[nr]); 1177 if (ret) { 1178 dev_err(dev, "can't deassert reset\n"); 1179 goto out_reset_assert; 1180 } 1181 } 1182 1183 ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, 1184 priv->pinmode_mask, priv->pinmode_val); 1185 if (ret) 1186 return ret; 1187 1188 ave_global_reset(ndev); 1189 1190 mdio_np = of_get_child_by_name(np, "mdio"); 1191 if (!mdio_np) { 1192 dev_err(dev, "mdio node not found\n"); 1193 ret = -EINVAL; 1194 goto out_reset_assert; 1195 } 1196 ret = of_mdiobus_register(priv->mdio, mdio_np); 1197 of_node_put(mdio_np); 1198 if (ret) { 1199 dev_err(dev, "failed to register mdiobus\n"); 1200 goto out_reset_assert; 1201 } 1202 1203 phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link); 1204 if (!phydev) { 1205 dev_err(dev, "could not attach to PHY\n"); 1206 ret = -ENODEV; 1207 goto out_mdio_unregister; 1208 } 1209 1210 priv->phydev = phydev; 1211 1212 ave_ethtool_get_wol(ndev, &wol); 1213 device_set_wakeup_capable(&ndev->dev, !!wol.supported); 1214 1215 /* set wol initial state disabled */ 1216 wol.wolopts = 0; 1217 ave_ethtool_set_wol(ndev, &wol); 1218 1219 if (!phy_interface_is_rgmii(phydev)) 1220 phy_set_max_speed(phydev, SPEED_100); 1221 1222 phy_support_asym_pause(phydev); 1223 1224 phy_attached_info(phydev); 1225 1226 return 0; 1227 1228 out_mdio_unregister: 1229 mdiobus_unregister(priv->mdio); 1230 out_reset_assert: 1231 while (--nr >= 0) 1232 reset_control_assert(priv->rst[nr]); 1233 out_clk_disable: 1234 while (--nc >= 0) 1235 clk_disable_unprepare(priv->clk[nc]); 1236 1237 return ret; 1238 } 1239 1240 static void ave_uninit(struct net_device *ndev) 1241 { 1242 struct ave_private *priv = netdev_priv(ndev); 1243 int i; 1244 1245 phy_disconnect(priv->phydev); 1246 mdiobus_unregister(priv->mdio); 1247 1248 /* disable clk because of hw access after ndo_stop */ 1249 for (i = 0; i < priv->nrsts; i++) 1250 reset_control_assert(priv->rst[i]); 1251 for (i = 0; i < priv->nclks; i++) 1252 clk_disable_unprepare(priv->clk[i]); 1253 } 1254 1255 static int ave_open(struct net_device *ndev) 1256 { 1257 struct ave_private *priv = netdev_priv(ndev); 1258 int entry; 1259 int ret; 1260 u32 val; 1261 1262 ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name, 1263 ndev); 1264 if (ret) 1265 return ret; 1266 1267 priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc), 1268 GFP_KERNEL); 1269 if (!priv->tx.desc) { 1270 ret = -ENOMEM; 1271 goto out_free_irq; 1272 } 1273 1274 priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc), 1275 GFP_KERNEL); 1276 if (!priv->rx.desc) { 1277 kfree(priv->tx.desc); 1278 ret = -ENOMEM; 1279 goto out_free_irq; 1280 } 1281 1282 /* initialize Tx work and descriptor */ 1283 priv->tx.proc_idx = 0; 1284 priv->tx.done_idx = 0; 1285 for (entry = 0; entry < priv->tx.ndesc; entry++) { 1286 ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0); 1287 ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0); 1288 } 1289 writel(AVE_TXDC_ADDR_START | 1290 (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE), 1291 priv->base + AVE_TXDC); 1292 1293 /* initialize Rx work and descriptor */ 1294 priv->rx.proc_idx = 0; 1295 priv->rx.done_idx = 0; 1296 for (entry = 0; entry < priv->rx.ndesc; entry++) { 1297 if (ave_rxdesc_prepare(ndev, entry)) 1298 break; 1299 } 1300 writel(AVE_RXDC0_ADDR_START | 1301 (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE), 1302 priv->base + AVE_RXDC0); 1303 1304 ave_desc_switch(ndev, AVE_DESC_START); 1305 1306 ave_pfsel_init(ndev); 1307 ave_macaddr_init(ndev); 1308 1309 /* set Rx configuration */ 1310 /* full duplex, enable pause drop, enalbe flow control */ 1311 val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN | 1312 AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK); 1313 writel(val, priv->base + AVE_RXCR); 1314 1315 /* set Tx configuration */ 1316 /* enable flow control, disable loopback */ 1317 writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR); 1318 1319 /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */ 1320 val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK; 1321 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); 1322 writel(val, priv->base + AVE_IIRQC); 1323 1324 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP; 1325 ave_irq_restore(ndev, val); 1326 1327 napi_enable(&priv->napi_rx); 1328 napi_enable(&priv->napi_tx); 1329 1330 phy_start(ndev->phydev); 1331 phy_start_aneg(ndev->phydev); 1332 netif_start_queue(ndev); 1333 1334 return 0; 1335 1336 out_free_irq: 1337 disable_irq(priv->irq); 1338 free_irq(priv->irq, ndev); 1339 1340 return ret; 1341 } 1342 1343 static int ave_stop(struct net_device *ndev) 1344 { 1345 struct ave_private *priv = netdev_priv(ndev); 1346 int entry; 1347 1348 ave_irq_disable_all(ndev); 1349 disable_irq(priv->irq); 1350 free_irq(priv->irq, ndev); 1351 1352 netif_tx_disable(ndev); 1353 phy_stop(ndev->phydev); 1354 napi_disable(&priv->napi_tx); 1355 napi_disable(&priv->napi_rx); 1356 1357 ave_desc_switch(ndev, AVE_DESC_STOP); 1358 1359 /* free Tx buffer */ 1360 for (entry = 0; entry < priv->tx.ndesc; entry++) { 1361 if (!priv->tx.desc[entry].skbs) 1362 continue; 1363 1364 ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE); 1365 dev_kfree_skb_any(priv->tx.desc[entry].skbs); 1366 priv->tx.desc[entry].skbs = NULL; 1367 } 1368 priv->tx.proc_idx = 0; 1369 priv->tx.done_idx = 0; 1370 1371 /* free Rx buffer */ 1372 for (entry = 0; entry < priv->rx.ndesc; entry++) { 1373 if (!priv->rx.desc[entry].skbs) 1374 continue; 1375 1376 ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE); 1377 dev_kfree_skb_any(priv->rx.desc[entry].skbs); 1378 priv->rx.desc[entry].skbs = NULL; 1379 } 1380 priv->rx.proc_idx = 0; 1381 priv->rx.done_idx = 0; 1382 1383 kfree(priv->tx.desc); 1384 kfree(priv->rx.desc); 1385 1386 return 0; 1387 } 1388 1389 static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1390 { 1391 struct ave_private *priv = netdev_priv(ndev); 1392 u32 proc_idx, done_idx, ndesc, cmdsts; 1393 int ret, freepkt; 1394 dma_addr_t paddr; 1395 1396 proc_idx = priv->tx.proc_idx; 1397 done_idx = priv->tx.done_idx; 1398 ndesc = priv->tx.ndesc; 1399 freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc; 1400 1401 /* stop queue when not enough entry */ 1402 if (unlikely(freepkt < 1)) { 1403 netif_stop_queue(ndev); 1404 return NETDEV_TX_BUSY; 1405 } 1406 1407 /* add padding for short packet */ 1408 if (skb_put_padto(skb, ETH_ZLEN)) { 1409 priv->stats_tx.dropped++; 1410 return NETDEV_TX_OK; 1411 } 1412 1413 /* map Tx buffer 1414 * Tx buffer set to the Tx descriptor doesn't have any restriction. 1415 */ 1416 ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx], 1417 skb->data, skb->len, DMA_TO_DEVICE, &paddr); 1418 if (ret) { 1419 dev_kfree_skb_any(skb); 1420 priv->stats_tx.dropped++; 1421 return NETDEV_TX_OK; 1422 } 1423 1424 priv->tx.desc[proc_idx].skbs = skb; 1425 1426 ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr); 1427 1428 cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST | 1429 (skb->len & AVE_STS_PKTLEN_TX_MASK); 1430 1431 /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */ 1432 if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev)) 1433 cmdsts |= AVE_STS_INTR; 1434 1435 /* disable checksum calculation when skb doesn't calurate checksum */ 1436 if (skb->ip_summed == CHECKSUM_NONE || 1437 skb->ip_summed == CHECKSUM_UNNECESSARY) 1438 cmdsts |= AVE_STS_NOCSUM; 1439 1440 ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts); 1441 1442 priv->tx.proc_idx = (proc_idx + 1) % ndesc; 1443 1444 return NETDEV_TX_OK; 1445 } 1446 1447 static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 1448 { 1449 return phy_mii_ioctl(ndev->phydev, ifr, cmd); 1450 } 1451 1452 static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 1453 static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }; 1454 1455 static void ave_set_rx_mode(struct net_device *ndev) 1456 { 1457 struct ave_private *priv = netdev_priv(ndev); 1458 struct netdev_hw_addr *hw_adr; 1459 int count, mc_cnt; 1460 u32 val; 1461 1462 /* MAC addr filter enable for promiscious mode */ 1463 mc_cnt = netdev_mc_count(ndev); 1464 val = readl(priv->base + AVE_RXCR); 1465 if (ndev->flags & IFF_PROMISC || !mc_cnt) 1466 val &= ~AVE_RXCR_AFEN; 1467 else 1468 val |= AVE_RXCR_AFEN; 1469 writel(val, priv->base + AVE_RXCR); 1470 1471 /* set all multicast address */ 1472 if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) { 1473 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST, 1474 v4multi_macadr, 1); 1475 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1, 1476 v6multi_macadr, 1); 1477 } else { 1478 /* stop all multicast filter */ 1479 for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++) 1480 ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count); 1481 1482 /* set multicast addresses */ 1483 count = 0; 1484 netdev_for_each_mc_addr(hw_adr, ndev) { 1485 if (count == mc_cnt) 1486 break; 1487 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count, 1488 hw_adr->addr, 6); 1489 count++; 1490 } 1491 } 1492 } 1493 1494 static void ave_get_stats64(struct net_device *ndev, 1495 struct rtnl_link_stats64 *stats) 1496 { 1497 struct ave_private *priv = netdev_priv(ndev); 1498 unsigned int start; 1499 1500 do { 1501 start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); 1502 stats->rx_packets = priv->stats_rx.packets; 1503 stats->rx_bytes = priv->stats_rx.bytes; 1504 } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); 1505 1506 do { 1507 start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); 1508 stats->tx_packets = priv->stats_tx.packets; 1509 stats->tx_bytes = priv->stats_tx.bytes; 1510 } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); 1511 1512 stats->rx_errors = priv->stats_rx.errors; 1513 stats->tx_errors = priv->stats_tx.errors; 1514 stats->rx_dropped = priv->stats_rx.dropped; 1515 stats->tx_dropped = priv->stats_tx.dropped; 1516 stats->rx_fifo_errors = priv->stats_rx.fifo_errors; 1517 stats->collisions = priv->stats_tx.collisions; 1518 } 1519 1520 static int ave_set_mac_address(struct net_device *ndev, void *p) 1521 { 1522 int ret = eth_mac_addr(ndev, p); 1523 1524 if (ret) 1525 return ret; 1526 1527 ave_macaddr_init(ndev); 1528 1529 return 0; 1530 } 1531 1532 static const struct net_device_ops ave_netdev_ops = { 1533 .ndo_init = ave_init, 1534 .ndo_uninit = ave_uninit, 1535 .ndo_open = ave_open, 1536 .ndo_stop = ave_stop, 1537 .ndo_start_xmit = ave_start_xmit, 1538 .ndo_do_ioctl = ave_ioctl, 1539 .ndo_set_rx_mode = ave_set_rx_mode, 1540 .ndo_get_stats64 = ave_get_stats64, 1541 .ndo_set_mac_address = ave_set_mac_address, 1542 }; 1543 1544 static int ave_probe(struct platform_device *pdev) 1545 { 1546 const struct ave_soc_data *data; 1547 struct device *dev = &pdev->dev; 1548 char buf[ETHTOOL_FWVERS_LEN]; 1549 struct of_phandle_args args; 1550 phy_interface_t phy_mode; 1551 struct ave_private *priv; 1552 struct net_device *ndev; 1553 struct device_node *np; 1554 struct resource *res; 1555 const void *mac_addr; 1556 void __iomem *base; 1557 const char *name; 1558 int i, irq, ret; 1559 u64 dma_mask; 1560 u32 ave_id; 1561 1562 data = of_device_get_match_data(dev); 1563 if (WARN_ON(!data)) 1564 return -EINVAL; 1565 1566 np = dev->of_node; 1567 phy_mode = of_get_phy_mode(np); 1568 if (phy_mode < 0) { 1569 dev_err(dev, "phy-mode not found\n"); 1570 return -EINVAL; 1571 } 1572 1573 irq = platform_get_irq(pdev, 0); 1574 if (irq < 0) { 1575 dev_err(dev, "IRQ not found\n"); 1576 return irq; 1577 } 1578 1579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1580 base = devm_ioremap_resource(dev, res); 1581 if (IS_ERR(base)) 1582 return PTR_ERR(base); 1583 1584 ndev = alloc_etherdev(sizeof(struct ave_private)); 1585 if (!ndev) { 1586 dev_err(dev, "can't allocate ethernet device\n"); 1587 return -ENOMEM; 1588 } 1589 1590 ndev->netdev_ops = &ave_netdev_ops; 1591 ndev->ethtool_ops = &ave_ethtool_ops; 1592 SET_NETDEV_DEV(ndev, dev); 1593 1594 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); 1595 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); 1596 1597 ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); 1598 1599 mac_addr = of_get_mac_address(np); 1600 if (mac_addr) 1601 ether_addr_copy(ndev->dev_addr, mac_addr); 1602 1603 /* if the mac address is invalid, use random mac address */ 1604 if (!is_valid_ether_addr(ndev->dev_addr)) { 1605 eth_hw_addr_random(ndev); 1606 dev_warn(dev, "Using random MAC address: %pM\n", 1607 ndev->dev_addr); 1608 } 1609 1610 priv = netdev_priv(ndev); 1611 priv->base = base; 1612 priv->irq = irq; 1613 priv->ndev = ndev; 1614 priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE); 1615 priv->phy_mode = phy_mode; 1616 priv->data = data; 1617 1618 if (IS_DESC_64BIT(priv)) { 1619 priv->desc_size = AVE_DESC_SIZE_64; 1620 priv->tx.daddr = AVE_TXDM_64; 1621 priv->rx.daddr = AVE_RXDM_64; 1622 dma_mask = DMA_BIT_MASK(64); 1623 } else { 1624 priv->desc_size = AVE_DESC_SIZE_32; 1625 priv->tx.daddr = AVE_TXDM_32; 1626 priv->rx.daddr = AVE_RXDM_32; 1627 dma_mask = DMA_BIT_MASK(32); 1628 } 1629 ret = dma_set_mask(dev, dma_mask); 1630 if (ret) 1631 goto out_free_netdev; 1632 1633 priv->tx.ndesc = AVE_NR_TXDESC; 1634 priv->rx.ndesc = AVE_NR_RXDESC; 1635 1636 u64_stats_init(&priv->stats_tx.syncp); 1637 u64_stats_init(&priv->stats_rx.syncp); 1638 1639 for (i = 0; i < AVE_MAX_CLKS; i++) { 1640 name = priv->data->clock_names[i]; 1641 if (!name) 1642 break; 1643 priv->clk[i] = devm_clk_get(dev, name); 1644 if (IS_ERR(priv->clk[i])) { 1645 ret = PTR_ERR(priv->clk[i]); 1646 goto out_free_netdev; 1647 } 1648 priv->nclks++; 1649 } 1650 1651 for (i = 0; i < AVE_MAX_RSTS; i++) { 1652 name = priv->data->reset_names[i]; 1653 if (!name) 1654 break; 1655 priv->rst[i] = devm_reset_control_get_shared(dev, name); 1656 if (IS_ERR(priv->rst[i])) { 1657 ret = PTR_ERR(priv->rst[i]); 1658 goto out_free_netdev; 1659 } 1660 priv->nrsts++; 1661 } 1662 1663 ret = of_parse_phandle_with_fixed_args(np, 1664 "socionext,syscon-phy-mode", 1665 1, 0, &args); 1666 if (ret) { 1667 netdev_err(ndev, "can't get syscon-phy-mode property\n"); 1668 goto out_free_netdev; 1669 } 1670 priv->regmap = syscon_node_to_regmap(args.np); 1671 of_node_put(args.np); 1672 if (IS_ERR(priv->regmap)) { 1673 netdev_err(ndev, "can't map syscon-phy-mode\n"); 1674 ret = PTR_ERR(priv->regmap); 1675 goto out_free_netdev; 1676 } 1677 ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]); 1678 if (ret) { 1679 netdev_err(ndev, "invalid phy-mode setting\n"); 1680 goto out_free_netdev; 1681 } 1682 1683 priv->mdio = devm_mdiobus_alloc(dev); 1684 if (!priv->mdio) { 1685 ret = -ENOMEM; 1686 goto out_free_netdev; 1687 } 1688 priv->mdio->priv = ndev; 1689 priv->mdio->parent = dev; 1690 priv->mdio->read = ave_mdiobus_read; 1691 priv->mdio->write = ave_mdiobus_write; 1692 priv->mdio->name = "uniphier-mdio"; 1693 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x", 1694 pdev->name, pdev->id); 1695 1696 /* Register as a NAPI supported driver */ 1697 netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc); 1698 netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, 1699 priv->tx.ndesc); 1700 1701 platform_set_drvdata(pdev, ndev); 1702 1703 ret = register_netdev(ndev); 1704 if (ret) { 1705 dev_err(dev, "failed to register netdevice\n"); 1706 goto out_del_napi; 1707 } 1708 1709 /* get ID and version */ 1710 ave_id = readl(priv->base + AVE_IDR); 1711 ave_hw_read_version(ndev, buf, sizeof(buf)); 1712 1713 dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n", 1714 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff, 1715 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff, 1716 buf, priv->irq, phy_modes(phy_mode)); 1717 1718 return 0; 1719 1720 out_del_napi: 1721 netif_napi_del(&priv->napi_rx); 1722 netif_napi_del(&priv->napi_tx); 1723 out_free_netdev: 1724 free_netdev(ndev); 1725 1726 return ret; 1727 } 1728 1729 static int ave_remove(struct platform_device *pdev) 1730 { 1731 struct net_device *ndev = platform_get_drvdata(pdev); 1732 struct ave_private *priv = netdev_priv(ndev); 1733 1734 unregister_netdev(ndev); 1735 netif_napi_del(&priv->napi_rx); 1736 netif_napi_del(&priv->napi_tx); 1737 free_netdev(ndev); 1738 1739 return 0; 1740 } 1741 1742 #ifdef CONFIG_PM_SLEEP 1743 static int ave_suspend(struct device *dev) 1744 { 1745 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1746 struct net_device *ndev = dev_get_drvdata(dev); 1747 struct ave_private *priv = netdev_priv(ndev); 1748 int ret = 0; 1749 1750 if (netif_running(ndev)) { 1751 ret = ave_stop(ndev); 1752 netif_device_detach(ndev); 1753 } 1754 1755 ave_ethtool_get_wol(ndev, &wol); 1756 priv->wolopts = wol.wolopts; 1757 1758 return ret; 1759 } 1760 1761 static int ave_resume(struct device *dev) 1762 { 1763 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1764 struct net_device *ndev = dev_get_drvdata(dev); 1765 struct ave_private *priv = netdev_priv(ndev); 1766 int ret = 0; 1767 1768 ave_global_reset(ndev); 1769 1770 ave_ethtool_get_wol(ndev, &wol); 1771 wol.wolopts = priv->wolopts; 1772 ave_ethtool_set_wol(ndev, &wol); 1773 1774 if (ndev->phydev) { 1775 ret = phy_resume(ndev->phydev); 1776 if (ret) 1777 return ret; 1778 } 1779 1780 if (netif_running(ndev)) { 1781 ret = ave_open(ndev); 1782 netif_device_attach(ndev); 1783 } 1784 1785 return ret; 1786 } 1787 1788 static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume); 1789 #define AVE_PM_OPS (&ave_pm_ops) 1790 #else 1791 #define AVE_PM_OPS NULL 1792 #endif 1793 1794 static int ave_pro4_get_pinmode(struct ave_private *priv, 1795 phy_interface_t phy_mode, u32 arg) 1796 { 1797 if (arg > 0) 1798 return -EINVAL; 1799 1800 priv->pinmode_mask = SG_ETPINMODE_RMII(0); 1801 1802 switch (phy_mode) { 1803 case PHY_INTERFACE_MODE_RMII: 1804 priv->pinmode_val = SG_ETPINMODE_RMII(0); 1805 break; 1806 case PHY_INTERFACE_MODE_MII: 1807 case PHY_INTERFACE_MODE_RGMII: 1808 priv->pinmode_val = 0; 1809 break; 1810 default: 1811 return -EINVAL; 1812 } 1813 1814 return 0; 1815 } 1816 1817 static int ave_ld11_get_pinmode(struct ave_private *priv, 1818 phy_interface_t phy_mode, u32 arg) 1819 { 1820 if (arg > 0) 1821 return -EINVAL; 1822 1823 priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); 1824 1825 switch (phy_mode) { 1826 case PHY_INTERFACE_MODE_INTERNAL: 1827 priv->pinmode_val = 0; 1828 break; 1829 case PHY_INTERFACE_MODE_RMII: 1830 priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); 1831 break; 1832 default: 1833 return -EINVAL; 1834 } 1835 1836 return 0; 1837 } 1838 1839 static int ave_ld20_get_pinmode(struct ave_private *priv, 1840 phy_interface_t phy_mode, u32 arg) 1841 { 1842 if (arg > 0) 1843 return -EINVAL; 1844 1845 priv->pinmode_mask = SG_ETPINMODE_RMII(0); 1846 1847 switch (phy_mode) { 1848 case PHY_INTERFACE_MODE_RMII: 1849 priv->pinmode_val = SG_ETPINMODE_RMII(0); 1850 break; 1851 case PHY_INTERFACE_MODE_RGMII: 1852 priv->pinmode_val = 0; 1853 break; 1854 default: 1855 return -EINVAL; 1856 } 1857 1858 return 0; 1859 } 1860 1861 static int ave_pxs3_get_pinmode(struct ave_private *priv, 1862 phy_interface_t phy_mode, u32 arg) 1863 { 1864 if (arg > 1) 1865 return -EINVAL; 1866 1867 priv->pinmode_mask = SG_ETPINMODE_RMII(arg); 1868 1869 switch (phy_mode) { 1870 case PHY_INTERFACE_MODE_RMII: 1871 priv->pinmode_val = SG_ETPINMODE_RMII(arg); 1872 break; 1873 case PHY_INTERFACE_MODE_RGMII: 1874 priv->pinmode_val = 0; 1875 break; 1876 default: 1877 return -EINVAL; 1878 } 1879 1880 return 0; 1881 } 1882 1883 static const struct ave_soc_data ave_pro4_data = { 1884 .is_desc_64bit = false, 1885 .clock_names = { 1886 "gio", "ether", "ether-gb", "ether-phy", 1887 }, 1888 .reset_names = { 1889 "gio", "ether", 1890 }, 1891 .get_pinmode = ave_pro4_get_pinmode, 1892 }; 1893 1894 static const struct ave_soc_data ave_pxs2_data = { 1895 .is_desc_64bit = false, 1896 .clock_names = { 1897 "ether", 1898 }, 1899 .reset_names = { 1900 "ether", 1901 }, 1902 .get_pinmode = ave_pro4_get_pinmode, 1903 }; 1904 1905 static const struct ave_soc_data ave_ld11_data = { 1906 .is_desc_64bit = false, 1907 .clock_names = { 1908 "ether", 1909 }, 1910 .reset_names = { 1911 "ether", 1912 }, 1913 .get_pinmode = ave_ld11_get_pinmode, 1914 }; 1915 1916 static const struct ave_soc_data ave_ld20_data = { 1917 .is_desc_64bit = true, 1918 .clock_names = { 1919 "ether", 1920 }, 1921 .reset_names = { 1922 "ether", 1923 }, 1924 .get_pinmode = ave_ld20_get_pinmode, 1925 }; 1926 1927 static const struct ave_soc_data ave_pxs3_data = { 1928 .is_desc_64bit = false, 1929 .clock_names = { 1930 "ether", 1931 }, 1932 .reset_names = { 1933 "ether", 1934 }, 1935 .get_pinmode = ave_pxs3_get_pinmode, 1936 }; 1937 1938 static const struct of_device_id of_ave_match[] = { 1939 { 1940 .compatible = "socionext,uniphier-pro4-ave4", 1941 .data = &ave_pro4_data, 1942 }, 1943 { 1944 .compatible = "socionext,uniphier-pxs2-ave4", 1945 .data = &ave_pxs2_data, 1946 }, 1947 { 1948 .compatible = "socionext,uniphier-ld11-ave4", 1949 .data = &ave_ld11_data, 1950 }, 1951 { 1952 .compatible = "socionext,uniphier-ld20-ave4", 1953 .data = &ave_ld20_data, 1954 }, 1955 { 1956 .compatible = "socionext,uniphier-pxs3-ave4", 1957 .data = &ave_pxs3_data, 1958 }, 1959 { /* Sentinel */ } 1960 }; 1961 MODULE_DEVICE_TABLE(of, of_ave_match); 1962 1963 static struct platform_driver ave_driver = { 1964 .probe = ave_probe, 1965 .remove = ave_remove, 1966 .driver = { 1967 .name = "ave", 1968 .pm = AVE_PM_OPS, 1969 .of_match_table = of_ave_match, 1970 }, 1971 }; 1972 module_platform_driver(ave_driver); 1973 1974 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); 1975 MODULE_LICENSE("GPL v2"); 1976