1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dave DNET Ethernet Controller driver 4 * 5 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu> 6 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com> 7 */ 8 #include <linux/io.h> 9 #include <linux/module.h> 10 #include <linux/moduleparam.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/delay.h> 15 #include <linux/interrupt.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/platform_device.h> 20 #include <linux/phy.h> 21 22 #include "dnet.h" 23 24 #undef DEBUG 25 26 /* function for reading internal MAC register */ 27 static u16 dnet_readw_mac(struct dnet *bp, u16 reg) 28 { 29 u16 data_read; 30 31 /* issue a read */ 32 dnet_writel(bp, reg, MACREG_ADDR); 33 34 /* since a read/write op to the MAC is very slow, 35 * we must wait before reading the data */ 36 ndelay(500); 37 38 /* read data read from the MAC register */ 39 data_read = dnet_readl(bp, MACREG_DATA); 40 41 /* all done */ 42 return data_read; 43 } 44 45 /* function for writing internal MAC register */ 46 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) 47 { 48 /* load data to write */ 49 dnet_writel(bp, val, MACREG_DATA); 50 51 /* issue a write */ 52 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); 53 54 /* since a read/write op to the MAC is very slow, 55 * we must wait before exiting */ 56 ndelay(500); 57 } 58 59 static void __dnet_set_hwaddr(struct dnet *bp) 60 { 61 u16 tmp; 62 63 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); 64 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); 65 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); 66 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); 67 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); 68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); 69 } 70 71 static void dnet_get_hwaddr(struct dnet *bp) 72 { 73 u16 tmp; 74 u8 addr[6]; 75 76 /* 77 * from MAC docs: 78 * "Note that the MAC address is stored in the registers in Hexadecimal 79 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 80 * would require writing 0xAC (octet 0) to address 0x0B (high byte of 81 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of 82 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of 83 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of 84 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of 85 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of 86 * Mac_addr[15:0]). 87 */ 88 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); 89 *((__be16 *)addr) = cpu_to_be16(tmp); 90 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); 91 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); 92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); 93 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); 94 95 if (is_valid_ether_addr(addr)) 96 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 97 } 98 99 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 100 { 101 struct dnet *bp = bus->priv; 102 u16 value; 103 104 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 105 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 106 cpu_relax(); 107 108 /* only 5 bits allowed for phy-addr and reg_offset */ 109 mii_id &= 0x1f; 110 regnum &= 0x1f; 111 112 /* prepare reg_value for a read */ 113 value = (mii_id << 8); 114 value |= regnum; 115 116 /* write control word */ 117 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); 118 119 /* wait for end of transfer */ 120 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 121 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 122 cpu_relax(); 123 124 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); 125 126 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); 127 128 return value; 129 } 130 131 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 132 u16 value) 133 { 134 struct dnet *bp = bus->priv; 135 u16 tmp; 136 137 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); 138 139 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 140 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 141 cpu_relax(); 142 143 /* prepare for a write operation */ 144 tmp = (1 << 13); 145 146 /* only 5 bits allowed for phy-addr and reg_offset */ 147 mii_id &= 0x1f; 148 regnum &= 0x1f; 149 150 /* only 16 bits on data */ 151 value &= 0xffff; 152 153 /* prepare reg_value for a write */ 154 tmp |= (mii_id << 8); 155 tmp |= regnum; 156 157 /* write data to write first */ 158 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); 159 160 /* write control word */ 161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); 162 163 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 164 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 165 cpu_relax(); 166 167 return 0; 168 } 169 170 static void dnet_handle_link_change(struct net_device *dev) 171 { 172 struct dnet *bp = netdev_priv(dev); 173 struct phy_device *phydev = dev->phydev; 174 unsigned long flags; 175 u32 mode_reg, ctl_reg; 176 177 int status_change = 0; 178 179 spin_lock_irqsave(&bp->lock, flags); 180 181 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); 182 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 183 184 if (phydev->link) { 185 if (bp->duplex != phydev->duplex) { 186 if (phydev->duplex) 187 ctl_reg &= 188 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); 189 else 190 ctl_reg |= 191 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; 192 193 bp->duplex = phydev->duplex; 194 status_change = 1; 195 } 196 197 if (bp->speed != phydev->speed) { 198 status_change = 1; 199 switch (phydev->speed) { 200 case 1000: 201 mode_reg |= DNET_INTERNAL_MODE_GBITEN; 202 break; 203 case 100: 204 case 10: 205 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; 206 break; 207 default: 208 printk(KERN_WARNING 209 "%s: Ack! Speed (%d) is not " 210 "10/100/1000!\n", dev->name, 211 phydev->speed); 212 break; 213 } 214 bp->speed = phydev->speed; 215 } 216 } 217 218 if (phydev->link != bp->link) { 219 if (phydev->link) { 220 mode_reg |= 221 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); 222 } else { 223 mode_reg &= 224 ~(DNET_INTERNAL_MODE_RXEN | 225 DNET_INTERNAL_MODE_TXEN); 226 bp->speed = 0; 227 bp->duplex = -1; 228 } 229 bp->link = phydev->link; 230 231 status_change = 1; 232 } 233 234 if (status_change) { 235 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); 236 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); 237 } 238 239 spin_unlock_irqrestore(&bp->lock, flags); 240 241 if (status_change) { 242 if (phydev->link) 243 printk(KERN_INFO "%s: link up (%d/%s)\n", 244 dev->name, phydev->speed, 245 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 246 else 247 printk(KERN_INFO "%s: link down\n", dev->name); 248 } 249 } 250 251 static int dnet_mii_probe(struct net_device *dev) 252 { 253 struct dnet *bp = netdev_priv(dev); 254 struct phy_device *phydev = NULL; 255 256 /* find the first phy */ 257 phydev = phy_find_first(bp->mii_bus); 258 259 if (!phydev) { 260 printk(KERN_ERR "%s: no PHY found\n", dev->name); 261 return -ENODEV; 262 } 263 264 /* TODO : add pin_irq */ 265 266 /* attach the mac to the phy */ 267 if (bp->capabilities & DNET_HAS_RMII) { 268 phydev = phy_connect(dev, phydev_name(phydev), 269 &dnet_handle_link_change, 270 PHY_INTERFACE_MODE_RMII); 271 } else { 272 phydev = phy_connect(dev, phydev_name(phydev), 273 &dnet_handle_link_change, 274 PHY_INTERFACE_MODE_MII); 275 } 276 277 if (IS_ERR(phydev)) { 278 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 279 return PTR_ERR(phydev); 280 } 281 282 /* mask with MAC supported features */ 283 if (bp->capabilities & DNET_HAS_GIGABIT) 284 phy_set_max_speed(phydev, SPEED_1000); 285 else 286 phy_set_max_speed(phydev, SPEED_100); 287 288 phy_support_asym_pause(phydev); 289 290 bp->link = 0; 291 bp->speed = 0; 292 bp->duplex = -1; 293 294 return 0; 295 } 296 297 static int dnet_mii_init(struct dnet *bp) 298 { 299 int err; 300 301 bp->mii_bus = mdiobus_alloc(); 302 if (bp->mii_bus == NULL) 303 return -ENOMEM; 304 305 bp->mii_bus->name = "dnet_mii_bus"; 306 bp->mii_bus->read = &dnet_mdio_read; 307 bp->mii_bus->write = &dnet_mdio_write; 308 309 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 310 bp->pdev->name, bp->pdev->id); 311 312 bp->mii_bus->priv = bp; 313 314 if (mdiobus_register(bp->mii_bus)) { 315 err = -ENXIO; 316 goto err_out; 317 } 318 319 if (dnet_mii_probe(bp->dev) != 0) { 320 err = -ENXIO; 321 goto err_out_unregister_bus; 322 } 323 324 return 0; 325 326 err_out_unregister_bus: 327 mdiobus_unregister(bp->mii_bus); 328 err_out: 329 mdiobus_free(bp->mii_bus); 330 return err; 331 } 332 333 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ 334 static int dnet_phy_marvell_fixup(struct phy_device *phydev) 335 { 336 return phy_write(phydev, 0x18, 0x4148); 337 } 338 339 static void dnet_update_stats(struct dnet *bp) 340 { 341 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; 342 u32 *p = &bp->hw_stats.rx_pkt_ignr; 343 u32 *end = &bp->hw_stats.rx_byte + 1; 344 345 WARN_ON((unsigned long)(end - p - 1) != 346 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); 347 348 for (; p < end; p++, reg++) 349 *p += readl(reg); 350 351 reg = bp->regs + DNET_TX_UNICAST_CNT; 352 p = &bp->hw_stats.tx_unicast; 353 end = &bp->hw_stats.tx_byte + 1; 354 355 WARN_ON((unsigned long)(end - p - 1) != 356 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); 357 358 for (; p < end; p++, reg++) 359 *p += readl(reg); 360 } 361 362 static int dnet_poll(struct napi_struct *napi, int budget) 363 { 364 struct dnet *bp = container_of(napi, struct dnet, napi); 365 struct net_device *dev = bp->dev; 366 int npackets = 0; 367 unsigned int pkt_len; 368 struct sk_buff *skb; 369 unsigned int *data_ptr; 370 u32 int_enable; 371 u32 cmd_word; 372 int i; 373 374 while (npackets < budget) { 375 /* 376 * break out of while loop if there are no more 377 * packets waiting 378 */ 379 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) 380 break; 381 382 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 383 pkt_len = cmd_word & 0xFFFF; 384 385 if (cmd_word & 0xDF180000) 386 printk(KERN_ERR "%s packet receive error %x\n", 387 __func__, cmd_word); 388 389 skb = netdev_alloc_skb(dev, pkt_len + 5); 390 if (skb != NULL) { 391 /* Align IP on 16 byte boundaries */ 392 skb_reserve(skb, 2); 393 /* 394 * 'skb_put()' points to the start of sk_buff 395 * data area. 396 */ 397 data_ptr = skb_put(skb, pkt_len); 398 for (i = 0; i < (pkt_len + 3) >> 2; i++) 399 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); 400 skb->protocol = eth_type_trans(skb, dev); 401 netif_receive_skb(skb); 402 npackets++; 403 } else 404 printk(KERN_NOTICE 405 "%s: No memory to allocate a sk_buff of " 406 "size %u.\n", dev->name, pkt_len); 407 } 408 409 if (npackets < budget) { 410 /* We processed all packets available. Tell NAPI it can 411 * stop polling then re-enable rx interrupts. 412 */ 413 napi_complete_done(napi, npackets); 414 int_enable = dnet_readl(bp, INTR_ENB); 415 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 416 dnet_writel(bp, int_enable, INTR_ENB); 417 } 418 419 return npackets; 420 } 421 422 static irqreturn_t dnet_interrupt(int irq, void *dev_id) 423 { 424 struct net_device *dev = dev_id; 425 struct dnet *bp = netdev_priv(dev); 426 u32 int_src, int_enable, int_current; 427 unsigned long flags; 428 unsigned int handled = 0; 429 430 spin_lock_irqsave(&bp->lock, flags); 431 432 /* read and clear the DNET irq (clear on read) */ 433 int_src = dnet_readl(bp, INTR_SRC); 434 int_enable = dnet_readl(bp, INTR_ENB); 435 int_current = int_src & int_enable; 436 437 /* restart the queue if we had stopped it for TX fifo almost full */ 438 if (int_current & DNET_INTR_SRC_TX_FIFOAE) { 439 int_enable = dnet_readl(bp, INTR_ENB); 440 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; 441 dnet_writel(bp, int_enable, INTR_ENB); 442 netif_wake_queue(dev); 443 handled = 1; 444 } 445 446 /* RX FIFO error checking */ 447 if (int_current & 448 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { 449 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, 450 dnet_readl(bp, RX_STATUS), int_current); 451 /* we can only flush the RX FIFOs */ 452 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); 453 ndelay(500); 454 dnet_writel(bp, 0, SYS_CTL); 455 handled = 1; 456 } 457 458 /* TX FIFO error checking */ 459 if (int_current & 460 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { 461 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, 462 dnet_readl(bp, TX_STATUS), int_current); 463 /* we can only flush the TX FIFOs */ 464 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); 465 ndelay(500); 466 dnet_writel(bp, 0, SYS_CTL); 467 handled = 1; 468 } 469 470 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { 471 if (napi_schedule_prep(&bp->napi)) { 472 /* 473 * There's no point taking any more interrupts 474 * until we have processed the buffers 475 */ 476 /* Disable Rx interrupts and schedule NAPI poll */ 477 int_enable = dnet_readl(bp, INTR_ENB); 478 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; 479 dnet_writel(bp, int_enable, INTR_ENB); 480 __napi_schedule(&bp->napi); 481 } 482 handled = 1; 483 } 484 485 if (!handled) 486 pr_debug("%s: irq %x remains\n", __func__, int_current); 487 488 spin_unlock_irqrestore(&bp->lock, flags); 489 490 return IRQ_RETVAL(handled); 491 } 492 493 #ifdef DEBUG 494 static inline void dnet_print_skb(struct sk_buff *skb) 495 { 496 int k; 497 printk(KERN_DEBUG PFX "data:"); 498 for (k = 0; k < skb->len; k++) 499 printk(" %02x", (unsigned int)skb->data[k]); 500 printk("\n"); 501 } 502 #else 503 #define dnet_print_skb(skb) do {} while (0) 504 #endif 505 506 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 507 { 508 509 struct dnet *bp = netdev_priv(dev); 510 u32 tx_status, irq_enable; 511 unsigned int len, i, tx_cmd, wrsz; 512 unsigned long flags; 513 unsigned int *bufp; 514 515 tx_status = dnet_readl(bp, TX_STATUS); 516 517 pr_debug("start_xmit: len %u head %p data %p\n", 518 skb->len, skb->head, skb->data); 519 dnet_print_skb(skb); 520 521 /* frame size (words) */ 522 len = (skb->len + 3) >> 2; 523 524 spin_lock_irqsave(&bp->lock, flags); 525 526 tx_status = dnet_readl(bp, TX_STATUS); 527 528 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); 529 wrsz = (u32) skb->len + 3; 530 wrsz += ((unsigned long) skb->data) & 0x3; 531 wrsz >>= 2; 532 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; 533 534 /* check if there is enough room for the current frame */ 535 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { 536 for (i = 0; i < wrsz; i++) 537 dnet_writel(bp, *bufp++, TX_DATA_FIFO); 538 539 /* 540 * inform MAC that a packet's written and ready to be 541 * shipped out 542 */ 543 dnet_writel(bp, tx_cmd, TX_LEN_FIFO); 544 } 545 546 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { 547 netif_stop_queue(dev); 548 tx_status = dnet_readl(bp, INTR_SRC); 549 irq_enable = dnet_readl(bp, INTR_ENB); 550 irq_enable |= DNET_INTR_ENB_TX_FIFOAE; 551 dnet_writel(bp, irq_enable, INTR_ENB); 552 } 553 554 skb_tx_timestamp(skb); 555 556 /* free the buffer */ 557 dev_kfree_skb(skb); 558 559 spin_unlock_irqrestore(&bp->lock, flags); 560 561 return NETDEV_TX_OK; 562 } 563 564 static void dnet_reset_hw(struct dnet *bp) 565 { 566 /* put ts_mac in IDLE state i.e. disable rx/tx */ 567 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); 568 569 /* 570 * RX FIFO almost full threshold: only cmd FIFO almost full is 571 * implemented for RX side 572 */ 573 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); 574 /* 575 * TX FIFO almost empty threshold: only data FIFO almost empty 576 * is implemented for TX side 577 */ 578 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); 579 580 /* flush rx/tx fifos */ 581 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, 582 SYS_CTL); 583 msleep(1); 584 dnet_writel(bp, 0, SYS_CTL); 585 } 586 587 static void dnet_init_hw(struct dnet *bp) 588 { 589 u32 config; 590 591 dnet_reset_hw(bp); 592 __dnet_set_hwaddr(bp); 593 594 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 595 596 if (bp->dev->flags & IFF_PROMISC) 597 /* Copy All Frames */ 598 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; 599 if (!(bp->dev->flags & IFF_BROADCAST)) 600 /* No BroadCast */ 601 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; 602 603 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | 604 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | 605 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | 606 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; 607 608 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); 609 610 /* clear irq before enabling them */ 611 config = dnet_readl(bp, INTR_SRC); 612 613 /* enable RX/TX interrupt, recv packet ready interrupt */ 614 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | 615 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | 616 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | 617 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | 618 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); 619 } 620 621 static int dnet_open(struct net_device *dev) 622 { 623 struct dnet *bp = netdev_priv(dev); 624 625 /* if the phy is not yet register, retry later */ 626 if (!dev->phydev) 627 return -EAGAIN; 628 629 napi_enable(&bp->napi); 630 dnet_init_hw(bp); 631 632 phy_start_aneg(dev->phydev); 633 634 /* schedule a link state check */ 635 phy_start(dev->phydev); 636 637 netif_start_queue(dev); 638 639 return 0; 640 } 641 642 static int dnet_close(struct net_device *dev) 643 { 644 struct dnet *bp = netdev_priv(dev); 645 646 netif_stop_queue(dev); 647 napi_disable(&bp->napi); 648 649 if (dev->phydev) 650 phy_stop(dev->phydev); 651 652 dnet_reset_hw(bp); 653 netif_carrier_off(dev); 654 655 return 0; 656 } 657 658 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) 659 { 660 pr_debug("%s\n", __func__); 661 pr_debug("----------------------------- RX statistics " 662 "-------------------------------\n"); 663 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); 664 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); 665 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); 666 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); 667 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); 668 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); 669 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); 670 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); 671 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); 672 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); 673 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); 674 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); 675 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); 676 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); 677 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); 678 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); 679 pr_debug("----------------------------- TX statistics " 680 "-------------------------------\n"); 681 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); 682 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); 683 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); 684 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); 685 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); 686 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); 687 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); 688 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); 689 } 690 691 static struct net_device_stats *dnet_get_stats(struct net_device *dev) 692 { 693 694 struct dnet *bp = netdev_priv(dev); 695 struct net_device_stats *nstat = &dev->stats; 696 struct dnet_stats *hwstat = &bp->hw_stats; 697 698 /* read stats from hardware */ 699 dnet_update_stats(bp); 700 701 /* Convert HW stats into netdevice stats */ 702 nstat->rx_errors = (hwstat->rx_len_chk_err + 703 hwstat->rx_lng_frm + hwstat->rx_shrt_frm + 704 /* ignore IGP violation error 705 hwstat->rx_ipg_viol + */ 706 hwstat->rx_crc_err + 707 hwstat->rx_pre_shrink + 708 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); 709 nstat->tx_errors = hwstat->tx_bad_fcs; 710 nstat->rx_length_errors = (hwstat->rx_len_chk_err + 711 hwstat->rx_lng_frm + 712 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); 713 nstat->rx_crc_errors = hwstat->rx_crc_err; 714 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; 715 nstat->rx_packets = hwstat->rx_ok_pkt; 716 nstat->tx_packets = (hwstat->tx_unicast + 717 hwstat->tx_multicast + hwstat->tx_brdcast); 718 nstat->rx_bytes = hwstat->rx_byte; 719 nstat->tx_bytes = hwstat->tx_byte; 720 nstat->multicast = hwstat->rx_multicast; 721 nstat->rx_missed_errors = hwstat->rx_pkt_ignr; 722 723 dnet_print_pretty_hwstats(hwstat); 724 725 return nstat; 726 } 727 728 static void dnet_get_drvinfo(struct net_device *dev, 729 struct ethtool_drvinfo *info) 730 { 731 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 732 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 733 strlcpy(info->bus_info, "0", sizeof(info->bus_info)); 734 } 735 736 static const struct ethtool_ops dnet_ethtool_ops = { 737 .get_drvinfo = dnet_get_drvinfo, 738 .get_link = ethtool_op_get_link, 739 .get_ts_info = ethtool_op_get_ts_info, 740 .get_link_ksettings = phy_ethtool_get_link_ksettings, 741 .set_link_ksettings = phy_ethtool_set_link_ksettings, 742 }; 743 744 static const struct net_device_ops dnet_netdev_ops = { 745 .ndo_open = dnet_open, 746 .ndo_stop = dnet_close, 747 .ndo_get_stats = dnet_get_stats, 748 .ndo_start_xmit = dnet_start_xmit, 749 .ndo_do_ioctl = phy_do_ioctl_running, 750 .ndo_set_mac_address = eth_mac_addr, 751 .ndo_validate_addr = eth_validate_addr, 752 }; 753 754 static int dnet_probe(struct platform_device *pdev) 755 { 756 struct resource *res; 757 struct net_device *dev; 758 struct dnet *bp; 759 struct phy_device *phydev; 760 int err; 761 unsigned int irq; 762 763 irq = platform_get_irq(pdev, 0); 764 765 dev = alloc_etherdev(sizeof(*bp)); 766 if (!dev) 767 return -ENOMEM; 768 769 /* TODO: Actually, we have some interesting features... */ 770 dev->features |= 0; 771 772 bp = netdev_priv(dev); 773 bp->dev = dev; 774 775 platform_set_drvdata(pdev, dev); 776 SET_NETDEV_DEV(dev, &pdev->dev); 777 778 spin_lock_init(&bp->lock); 779 780 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 781 bp->regs = devm_ioremap_resource(&pdev->dev, res); 782 if (IS_ERR(bp->regs)) { 783 err = PTR_ERR(bp->regs); 784 goto err_out_free_dev; 785 } 786 787 dev->irq = irq; 788 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); 789 if (err) { 790 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", 791 irq, err); 792 goto err_out_free_dev; 793 } 794 795 dev->netdev_ops = &dnet_netdev_ops; 796 netif_napi_add(dev, &bp->napi, dnet_poll, 64); 797 dev->ethtool_ops = &dnet_ethtool_ops; 798 799 dev->base_addr = (unsigned long)bp->regs; 800 801 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; 802 803 dnet_get_hwaddr(bp); 804 805 if (!is_valid_ether_addr(dev->dev_addr)) { 806 /* choose a random ethernet address */ 807 eth_hw_addr_random(dev); 808 __dnet_set_hwaddr(bp); 809 } 810 811 err = register_netdev(dev); 812 if (err) { 813 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 814 goto err_out_free_irq; 815 } 816 817 /* register the PHY board fixup (for Marvell 88E1111) */ 818 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, 819 dnet_phy_marvell_fixup); 820 /* we can live without it, so just issue a warning */ 821 if (err) 822 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); 823 824 err = dnet_mii_init(bp); 825 if (err) 826 goto err_out_unregister_netdev; 827 828 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 829 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); 830 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", 831 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", 832 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 833 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 834 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); 835 phydev = dev->phydev; 836 phy_attached_info(phydev); 837 838 return 0; 839 840 err_out_unregister_netdev: 841 unregister_netdev(dev); 842 err_out_free_irq: 843 free_irq(dev->irq, dev); 844 err_out_free_dev: 845 free_netdev(dev); 846 return err; 847 } 848 849 static int dnet_remove(struct platform_device *pdev) 850 { 851 852 struct net_device *dev; 853 struct dnet *bp; 854 855 dev = platform_get_drvdata(pdev); 856 857 if (dev) { 858 bp = netdev_priv(dev); 859 if (dev->phydev) 860 phy_disconnect(dev->phydev); 861 mdiobus_unregister(bp->mii_bus); 862 mdiobus_free(bp->mii_bus); 863 unregister_netdev(dev); 864 free_irq(dev->irq, dev); 865 free_netdev(dev); 866 } 867 868 return 0; 869 } 870 871 static struct platform_driver dnet_driver = { 872 .probe = dnet_probe, 873 .remove = dnet_remove, 874 .driver = { 875 .name = "dnet", 876 }, 877 }; 878 879 module_platform_driver(dnet_driver); 880 881 MODULE_LICENSE("GPL"); 882 MODULE_DESCRIPTION("Dave DNET Ethernet driver"); 883 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, " 884 "Matteo Vit <matteo.vit@dave.eu>"); 885