1 /* 2 * Dave DNET Ethernet Controller driver 3 * 4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu> 5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/io.h> 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/kernel.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <linux/delay.h> 18 #include <linux/interrupt.h> 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/platform_device.h> 23 #include <linux/phy.h> 24 25 #include "dnet.h" 26 27 #undef DEBUG 28 29 /* function for reading internal MAC register */ 30 static u16 dnet_readw_mac(struct dnet *bp, u16 reg) 31 { 32 u16 data_read; 33 34 /* issue a read */ 35 dnet_writel(bp, reg, MACREG_ADDR); 36 37 /* since a read/write op to the MAC is very slow, 38 * we must wait before reading the data */ 39 ndelay(500); 40 41 /* read data read from the MAC register */ 42 data_read = dnet_readl(bp, MACREG_DATA); 43 44 /* all done */ 45 return data_read; 46 } 47 48 /* function for writing internal MAC register */ 49 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) 50 { 51 /* load data to write */ 52 dnet_writel(bp, val, MACREG_DATA); 53 54 /* issue a write */ 55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); 56 57 /* since a read/write op to the MAC is very slow, 58 * we must wait before exiting */ 59 ndelay(500); 60 } 61 62 static void __dnet_set_hwaddr(struct dnet *bp) 63 { 64 u16 tmp; 65 66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); 67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); 68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); 69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); 70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); 71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); 72 } 73 74 static void dnet_get_hwaddr(struct dnet *bp) 75 { 76 u16 tmp; 77 u8 addr[6]; 78 79 /* 80 * from MAC docs: 81 * "Note that the MAC address is stored in the registers in Hexadecimal 82 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 83 * would require writing 0xAC (octet 0) to address 0x0B (high byte of 84 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of 85 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of 86 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of 87 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of 88 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of 89 * Mac_addr[15:0]). 90 */ 91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); 92 *((__be16 *)addr) = cpu_to_be16(tmp); 93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); 94 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); 95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); 96 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); 97 98 if (is_valid_ether_addr(addr)) 99 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 100 } 101 102 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 103 { 104 struct dnet *bp = bus->priv; 105 u16 value; 106 107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 108 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 109 cpu_relax(); 110 111 /* only 5 bits allowed for phy-addr and reg_offset */ 112 mii_id &= 0x1f; 113 regnum &= 0x1f; 114 115 /* prepare reg_value for a read */ 116 value = (mii_id << 8); 117 value |= regnum; 118 119 /* write control word */ 120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); 121 122 /* wait for end of transfer */ 123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 124 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 125 cpu_relax(); 126 127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); 128 129 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); 130 131 return value; 132 } 133 134 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 135 u16 value) 136 { 137 struct dnet *bp = bus->priv; 138 u16 tmp; 139 140 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); 141 142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 143 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 144 cpu_relax(); 145 146 /* prepare for a write operation */ 147 tmp = (1 << 13); 148 149 /* only 5 bits allowed for phy-addr and reg_offset */ 150 mii_id &= 0x1f; 151 regnum &= 0x1f; 152 153 /* only 16 bits on data */ 154 value &= 0xffff; 155 156 /* prepare reg_value for a write */ 157 tmp |= (mii_id << 8); 158 tmp |= regnum; 159 160 /* write data to write first */ 161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); 162 163 /* write control word */ 164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); 165 166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) 167 & DNET_INTERNAL_GMII_MNG_CMD_FIN)) 168 cpu_relax(); 169 170 return 0; 171 } 172 173 static void dnet_handle_link_change(struct net_device *dev) 174 { 175 struct dnet *bp = netdev_priv(dev); 176 struct phy_device *phydev = bp->phy_dev; 177 unsigned long flags; 178 u32 mode_reg, ctl_reg; 179 180 int status_change = 0; 181 182 spin_lock_irqsave(&bp->lock, flags); 183 184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); 185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 186 187 if (phydev->link) { 188 if (bp->duplex != phydev->duplex) { 189 if (phydev->duplex) 190 ctl_reg &= 191 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); 192 else 193 ctl_reg |= 194 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; 195 196 bp->duplex = phydev->duplex; 197 status_change = 1; 198 } 199 200 if (bp->speed != phydev->speed) { 201 status_change = 1; 202 switch (phydev->speed) { 203 case 1000: 204 mode_reg |= DNET_INTERNAL_MODE_GBITEN; 205 break; 206 case 100: 207 case 10: 208 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; 209 break; 210 default: 211 printk(KERN_WARNING 212 "%s: Ack! Speed (%d) is not " 213 "10/100/1000!\n", dev->name, 214 phydev->speed); 215 break; 216 } 217 bp->speed = phydev->speed; 218 } 219 } 220 221 if (phydev->link != bp->link) { 222 if (phydev->link) { 223 mode_reg |= 224 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); 225 } else { 226 mode_reg &= 227 ~(DNET_INTERNAL_MODE_RXEN | 228 DNET_INTERNAL_MODE_TXEN); 229 bp->speed = 0; 230 bp->duplex = -1; 231 } 232 bp->link = phydev->link; 233 234 status_change = 1; 235 } 236 237 if (status_change) { 238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); 239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); 240 } 241 242 spin_unlock_irqrestore(&bp->lock, flags); 243 244 if (status_change) { 245 if (phydev->link) 246 printk(KERN_INFO "%s: link up (%d/%s)\n", 247 dev->name, phydev->speed, 248 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 249 else 250 printk(KERN_INFO "%s: link down\n", dev->name); 251 } 252 } 253 254 static int dnet_mii_probe(struct net_device *dev) 255 { 256 struct dnet *bp = netdev_priv(dev); 257 struct phy_device *phydev = NULL; 258 259 /* find the first phy */ 260 phydev = phy_find_first(bp->mii_bus); 261 262 if (!phydev) { 263 printk(KERN_ERR "%s: no PHY found\n", dev->name); 264 return -ENODEV; 265 } 266 267 /* TODO : add pin_irq */ 268 269 /* attach the mac to the phy */ 270 if (bp->capabilities & DNET_HAS_RMII) { 271 phydev = phy_connect(dev, phydev_name(phydev), 272 &dnet_handle_link_change, 273 PHY_INTERFACE_MODE_RMII); 274 } else { 275 phydev = phy_connect(dev, phydev_name(phydev), 276 &dnet_handle_link_change, 277 PHY_INTERFACE_MODE_MII); 278 } 279 280 if (IS_ERR(phydev)) { 281 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 282 return PTR_ERR(phydev); 283 } 284 285 /* mask with MAC supported features */ 286 if (bp->capabilities & DNET_HAS_GIGABIT) 287 phydev->supported &= PHY_GBIT_FEATURES; 288 else 289 phydev->supported &= PHY_BASIC_FEATURES; 290 291 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; 292 293 phydev->advertising = phydev->supported; 294 295 bp->link = 0; 296 bp->speed = 0; 297 bp->duplex = -1; 298 bp->phy_dev = phydev; 299 300 return 0; 301 } 302 303 static int dnet_mii_init(struct dnet *bp) 304 { 305 int err; 306 307 bp->mii_bus = mdiobus_alloc(); 308 if (bp->mii_bus == NULL) 309 return -ENOMEM; 310 311 bp->mii_bus->name = "dnet_mii_bus"; 312 bp->mii_bus->read = &dnet_mdio_read; 313 bp->mii_bus->write = &dnet_mdio_write; 314 315 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 316 bp->pdev->name, bp->pdev->id); 317 318 bp->mii_bus->priv = bp; 319 320 if (mdiobus_register(bp->mii_bus)) { 321 err = -ENXIO; 322 goto err_out; 323 } 324 325 if (dnet_mii_probe(bp->dev) != 0) { 326 err = -ENXIO; 327 goto err_out_unregister_bus; 328 } 329 330 return 0; 331 332 err_out_unregister_bus: 333 mdiobus_unregister(bp->mii_bus); 334 err_out: 335 mdiobus_free(bp->mii_bus); 336 return err; 337 } 338 339 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ 340 static int dnet_phy_marvell_fixup(struct phy_device *phydev) 341 { 342 return phy_write(phydev, 0x18, 0x4148); 343 } 344 345 static void dnet_update_stats(struct dnet *bp) 346 { 347 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; 348 u32 *p = &bp->hw_stats.rx_pkt_ignr; 349 u32 *end = &bp->hw_stats.rx_byte + 1; 350 351 WARN_ON((unsigned long)(end - p - 1) != 352 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); 353 354 for (; p < end; p++, reg++) 355 *p += readl(reg); 356 357 reg = bp->regs + DNET_TX_UNICAST_CNT; 358 p = &bp->hw_stats.tx_unicast; 359 end = &bp->hw_stats.tx_byte + 1; 360 361 WARN_ON((unsigned long)(end - p - 1) != 362 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); 363 364 for (; p < end; p++, reg++) 365 *p += readl(reg); 366 } 367 368 static int dnet_poll(struct napi_struct *napi, int budget) 369 { 370 struct dnet *bp = container_of(napi, struct dnet, napi); 371 struct net_device *dev = bp->dev; 372 int npackets = 0; 373 unsigned int pkt_len; 374 struct sk_buff *skb; 375 unsigned int *data_ptr; 376 u32 int_enable; 377 u32 cmd_word; 378 int i; 379 380 while (npackets < budget) { 381 /* 382 * break out of while loop if there are no more 383 * packets waiting 384 */ 385 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) 386 break; 387 388 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 389 pkt_len = cmd_word & 0xFFFF; 390 391 if (cmd_word & 0xDF180000) 392 printk(KERN_ERR "%s packet receive error %x\n", 393 __func__, cmd_word); 394 395 skb = netdev_alloc_skb(dev, pkt_len + 5); 396 if (skb != NULL) { 397 /* Align IP on 16 byte boundaries */ 398 skb_reserve(skb, 2); 399 /* 400 * 'skb_put()' points to the start of sk_buff 401 * data area. 402 */ 403 data_ptr = (unsigned int *)skb_put(skb, pkt_len); 404 for (i = 0; i < (pkt_len + 3) >> 2; i++) 405 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); 406 skb->protocol = eth_type_trans(skb, dev); 407 netif_receive_skb(skb); 408 npackets++; 409 } else 410 printk(KERN_NOTICE 411 "%s: No memory to allocate a sk_buff of " 412 "size %u.\n", dev->name, pkt_len); 413 } 414 415 if (npackets < budget) { 416 /* We processed all packets available. Tell NAPI it can 417 * stop polling then re-enable rx interrupts. 418 */ 419 napi_complete(napi); 420 int_enable = dnet_readl(bp, INTR_ENB); 421 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 422 dnet_writel(bp, int_enable, INTR_ENB); 423 } 424 425 return npackets; 426 } 427 428 static irqreturn_t dnet_interrupt(int irq, void *dev_id) 429 { 430 struct net_device *dev = dev_id; 431 struct dnet *bp = netdev_priv(dev); 432 u32 int_src, int_enable, int_current; 433 unsigned long flags; 434 unsigned int handled = 0; 435 436 spin_lock_irqsave(&bp->lock, flags); 437 438 /* read and clear the DNET irq (clear on read) */ 439 int_src = dnet_readl(bp, INTR_SRC); 440 int_enable = dnet_readl(bp, INTR_ENB); 441 int_current = int_src & int_enable; 442 443 /* restart the queue if we had stopped it for TX fifo almost full */ 444 if (int_current & DNET_INTR_SRC_TX_FIFOAE) { 445 int_enable = dnet_readl(bp, INTR_ENB); 446 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; 447 dnet_writel(bp, int_enable, INTR_ENB); 448 netif_wake_queue(dev); 449 handled = 1; 450 } 451 452 /* RX FIFO error checking */ 453 if (int_current & 454 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { 455 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, 456 dnet_readl(bp, RX_STATUS), int_current); 457 /* we can only flush the RX FIFOs */ 458 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); 459 ndelay(500); 460 dnet_writel(bp, 0, SYS_CTL); 461 handled = 1; 462 } 463 464 /* TX FIFO error checking */ 465 if (int_current & 466 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { 467 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, 468 dnet_readl(bp, TX_STATUS), int_current); 469 /* we can only flush the TX FIFOs */ 470 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); 471 ndelay(500); 472 dnet_writel(bp, 0, SYS_CTL); 473 handled = 1; 474 } 475 476 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { 477 if (napi_schedule_prep(&bp->napi)) { 478 /* 479 * There's no point taking any more interrupts 480 * until we have processed the buffers 481 */ 482 /* Disable Rx interrupts and schedule NAPI poll */ 483 int_enable = dnet_readl(bp, INTR_ENB); 484 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; 485 dnet_writel(bp, int_enable, INTR_ENB); 486 __napi_schedule(&bp->napi); 487 } 488 handled = 1; 489 } 490 491 if (!handled) 492 pr_debug("%s: irq %x remains\n", __func__, int_current); 493 494 spin_unlock_irqrestore(&bp->lock, flags); 495 496 return IRQ_RETVAL(handled); 497 } 498 499 #ifdef DEBUG 500 static inline void dnet_print_skb(struct sk_buff *skb) 501 { 502 int k; 503 printk(KERN_DEBUG PFX "data:"); 504 for (k = 0; k < skb->len; k++) 505 printk(" %02x", (unsigned int)skb->data[k]); 506 printk("\n"); 507 } 508 #else 509 #define dnet_print_skb(skb) do {} while (0) 510 #endif 511 512 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 513 { 514 515 struct dnet *bp = netdev_priv(dev); 516 u32 tx_status, irq_enable; 517 unsigned int len, i, tx_cmd, wrsz; 518 unsigned long flags; 519 unsigned int *bufp; 520 521 tx_status = dnet_readl(bp, TX_STATUS); 522 523 pr_debug("start_xmit: len %u head %p data %p\n", 524 skb->len, skb->head, skb->data); 525 dnet_print_skb(skb); 526 527 /* frame size (words) */ 528 len = (skb->len + 3) >> 2; 529 530 spin_lock_irqsave(&bp->lock, flags); 531 532 tx_status = dnet_readl(bp, TX_STATUS); 533 534 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); 535 wrsz = (u32) skb->len + 3; 536 wrsz += ((unsigned long) skb->data) & 0x3; 537 wrsz >>= 2; 538 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; 539 540 /* check if there is enough room for the current frame */ 541 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { 542 for (i = 0; i < wrsz; i++) 543 dnet_writel(bp, *bufp++, TX_DATA_FIFO); 544 545 /* 546 * inform MAC that a packet's written and ready to be 547 * shipped out 548 */ 549 dnet_writel(bp, tx_cmd, TX_LEN_FIFO); 550 } 551 552 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { 553 netif_stop_queue(dev); 554 tx_status = dnet_readl(bp, INTR_SRC); 555 irq_enable = dnet_readl(bp, INTR_ENB); 556 irq_enable |= DNET_INTR_ENB_TX_FIFOAE; 557 dnet_writel(bp, irq_enable, INTR_ENB); 558 } 559 560 skb_tx_timestamp(skb); 561 562 /* free the buffer */ 563 dev_kfree_skb(skb); 564 565 spin_unlock_irqrestore(&bp->lock, flags); 566 567 return NETDEV_TX_OK; 568 } 569 570 static void dnet_reset_hw(struct dnet *bp) 571 { 572 /* put ts_mac in IDLE state i.e. disable rx/tx */ 573 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); 574 575 /* 576 * RX FIFO almost full threshold: only cmd FIFO almost full is 577 * implemented for RX side 578 */ 579 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); 580 /* 581 * TX FIFO almost empty threshold: only data FIFO almost empty 582 * is implemented for TX side 583 */ 584 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); 585 586 /* flush rx/tx fifos */ 587 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, 588 SYS_CTL); 589 msleep(1); 590 dnet_writel(bp, 0, SYS_CTL); 591 } 592 593 static void dnet_init_hw(struct dnet *bp) 594 { 595 u32 config; 596 597 dnet_reset_hw(bp); 598 __dnet_set_hwaddr(bp); 599 600 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); 601 602 if (bp->dev->flags & IFF_PROMISC) 603 /* Copy All Frames */ 604 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; 605 if (!(bp->dev->flags & IFF_BROADCAST)) 606 /* No BroadCast */ 607 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; 608 609 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | 610 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | 611 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | 612 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; 613 614 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); 615 616 /* clear irq before enabling them */ 617 config = dnet_readl(bp, INTR_SRC); 618 619 /* enable RX/TX interrupt, recv packet ready interrupt */ 620 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | 621 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | 622 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | 623 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | 624 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); 625 } 626 627 static int dnet_open(struct net_device *dev) 628 { 629 struct dnet *bp = netdev_priv(dev); 630 631 /* if the phy is not yet register, retry later */ 632 if (!bp->phy_dev) 633 return -EAGAIN; 634 635 napi_enable(&bp->napi); 636 dnet_init_hw(bp); 637 638 phy_start_aneg(bp->phy_dev); 639 640 /* schedule a link state check */ 641 phy_start(bp->phy_dev); 642 643 netif_start_queue(dev); 644 645 return 0; 646 } 647 648 static int dnet_close(struct net_device *dev) 649 { 650 struct dnet *bp = netdev_priv(dev); 651 652 netif_stop_queue(dev); 653 napi_disable(&bp->napi); 654 655 if (bp->phy_dev) 656 phy_stop(bp->phy_dev); 657 658 dnet_reset_hw(bp); 659 netif_carrier_off(dev); 660 661 return 0; 662 } 663 664 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) 665 { 666 pr_debug("%s\n", __func__); 667 pr_debug("----------------------------- RX statistics " 668 "-------------------------------\n"); 669 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); 670 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); 671 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); 672 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); 673 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); 674 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); 675 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); 676 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); 677 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); 678 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); 679 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); 680 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); 681 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); 682 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); 683 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); 684 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); 685 pr_debug("----------------------------- TX statistics " 686 "-------------------------------\n"); 687 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); 688 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); 689 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); 690 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); 691 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); 692 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); 693 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); 694 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); 695 } 696 697 static struct net_device_stats *dnet_get_stats(struct net_device *dev) 698 { 699 700 struct dnet *bp = netdev_priv(dev); 701 struct net_device_stats *nstat = &dev->stats; 702 struct dnet_stats *hwstat = &bp->hw_stats; 703 704 /* read stats from hardware */ 705 dnet_update_stats(bp); 706 707 /* Convert HW stats into netdevice stats */ 708 nstat->rx_errors = (hwstat->rx_len_chk_err + 709 hwstat->rx_lng_frm + hwstat->rx_shrt_frm + 710 /* ignore IGP violation error 711 hwstat->rx_ipg_viol + */ 712 hwstat->rx_crc_err + 713 hwstat->rx_pre_shrink + 714 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); 715 nstat->tx_errors = hwstat->tx_bad_fcs; 716 nstat->rx_length_errors = (hwstat->rx_len_chk_err + 717 hwstat->rx_lng_frm + 718 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); 719 nstat->rx_crc_errors = hwstat->rx_crc_err; 720 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; 721 nstat->rx_packets = hwstat->rx_ok_pkt; 722 nstat->tx_packets = (hwstat->tx_unicast + 723 hwstat->tx_multicast + hwstat->tx_brdcast); 724 nstat->rx_bytes = hwstat->rx_byte; 725 nstat->tx_bytes = hwstat->tx_byte; 726 nstat->multicast = hwstat->rx_multicast; 727 nstat->rx_missed_errors = hwstat->rx_pkt_ignr; 728 729 dnet_print_pretty_hwstats(hwstat); 730 731 return nstat; 732 } 733 734 static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 735 { 736 struct dnet *bp = netdev_priv(dev); 737 struct phy_device *phydev = bp->phy_dev; 738 739 if (!phydev) 740 return -ENODEV; 741 742 return phy_ethtool_gset(phydev, cmd); 743 } 744 745 static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 746 { 747 struct dnet *bp = netdev_priv(dev); 748 struct phy_device *phydev = bp->phy_dev; 749 750 if (!phydev) 751 return -ENODEV; 752 753 return phy_ethtool_sset(phydev, cmd); 754 } 755 756 static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 757 { 758 struct dnet *bp = netdev_priv(dev); 759 struct phy_device *phydev = bp->phy_dev; 760 761 if (!netif_running(dev)) 762 return -EINVAL; 763 764 if (!phydev) 765 return -ENODEV; 766 767 return phy_mii_ioctl(phydev, rq, cmd); 768 } 769 770 static void dnet_get_drvinfo(struct net_device *dev, 771 struct ethtool_drvinfo *info) 772 { 773 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 774 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 775 strlcpy(info->bus_info, "0", sizeof(info->bus_info)); 776 } 777 778 static const struct ethtool_ops dnet_ethtool_ops = { 779 .get_settings = dnet_get_settings, 780 .set_settings = dnet_set_settings, 781 .get_drvinfo = dnet_get_drvinfo, 782 .get_link = ethtool_op_get_link, 783 .get_ts_info = ethtool_op_get_ts_info, 784 }; 785 786 static const struct net_device_ops dnet_netdev_ops = { 787 .ndo_open = dnet_open, 788 .ndo_stop = dnet_close, 789 .ndo_get_stats = dnet_get_stats, 790 .ndo_start_xmit = dnet_start_xmit, 791 .ndo_do_ioctl = dnet_ioctl, 792 .ndo_set_mac_address = eth_mac_addr, 793 .ndo_validate_addr = eth_validate_addr, 794 .ndo_change_mtu = eth_change_mtu, 795 }; 796 797 static int dnet_probe(struct platform_device *pdev) 798 { 799 struct resource *res; 800 struct net_device *dev; 801 struct dnet *bp; 802 struct phy_device *phydev; 803 int err; 804 unsigned int irq; 805 806 irq = platform_get_irq(pdev, 0); 807 808 dev = alloc_etherdev(sizeof(*bp)); 809 if (!dev) 810 return -ENOMEM; 811 812 /* TODO: Actually, we have some interesting features... */ 813 dev->features |= 0; 814 815 bp = netdev_priv(dev); 816 bp->dev = dev; 817 818 platform_set_drvdata(pdev, dev); 819 SET_NETDEV_DEV(dev, &pdev->dev); 820 821 spin_lock_init(&bp->lock); 822 823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 824 bp->regs = devm_ioremap_resource(&pdev->dev, res); 825 if (IS_ERR(bp->regs)) { 826 err = PTR_ERR(bp->regs); 827 goto err_out_free_dev; 828 } 829 830 dev->irq = irq; 831 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); 832 if (err) { 833 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", 834 irq, err); 835 goto err_out_free_dev; 836 } 837 838 dev->netdev_ops = &dnet_netdev_ops; 839 netif_napi_add(dev, &bp->napi, dnet_poll, 64); 840 dev->ethtool_ops = &dnet_ethtool_ops; 841 842 dev->base_addr = (unsigned long)bp->regs; 843 844 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; 845 846 dnet_get_hwaddr(bp); 847 848 if (!is_valid_ether_addr(dev->dev_addr)) { 849 /* choose a random ethernet address */ 850 eth_hw_addr_random(dev); 851 __dnet_set_hwaddr(bp); 852 } 853 854 err = register_netdev(dev); 855 if (err) { 856 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 857 goto err_out_free_irq; 858 } 859 860 /* register the PHY board fixup (for Marvell 88E1111) */ 861 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, 862 dnet_phy_marvell_fixup); 863 /* we can live without it, so just issue a warning */ 864 if (err) 865 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); 866 867 err = dnet_mii_init(bp); 868 if (err) 869 goto err_out_unregister_netdev; 870 871 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 872 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); 873 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", 874 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", 875 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 876 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 877 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); 878 phydev = bp->phy_dev; 879 phy_attached_info(phydev); 880 881 return 0; 882 883 err_out_unregister_netdev: 884 unregister_netdev(dev); 885 err_out_free_irq: 886 free_irq(dev->irq, dev); 887 err_out_free_dev: 888 free_netdev(dev); 889 return err; 890 } 891 892 static int dnet_remove(struct platform_device *pdev) 893 { 894 895 struct net_device *dev; 896 struct dnet *bp; 897 898 dev = platform_get_drvdata(pdev); 899 900 if (dev) { 901 bp = netdev_priv(dev); 902 if (bp->phy_dev) 903 phy_disconnect(bp->phy_dev); 904 mdiobus_unregister(bp->mii_bus); 905 mdiobus_free(bp->mii_bus); 906 unregister_netdev(dev); 907 free_irq(dev->irq, dev); 908 free_netdev(dev); 909 } 910 911 return 0; 912 } 913 914 static struct platform_driver dnet_driver = { 915 .probe = dnet_probe, 916 .remove = dnet_remove, 917 .driver = { 918 .name = "dnet", 919 }, 920 }; 921 922 module_platform_driver(dnet_driver); 923 924 MODULE_LICENSE("GPL"); 925 MODULE_DESCRIPTION("Dave DNET Ethernet driver"); 926 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, " 927 "Matteo Vit <matteo.vit@dave.eu>"); 928