1 /* 2 * Intel IXP4xx Ethernet driver for Linux 3 * 4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 * 10 * Ethernet port config (0x00 is not present on IXP42X): 11 * 12 * logical port 0x00 0x10 0x20 13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 14 * physical PortId 2 0 1 15 * TX queue 23 24 25 16 * RX-free queue 26 27 28 17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 18 * 19 * 20 * Queue entries: 21 * bits 0 -> 1 - NPE ID (RX and TX-done) 22 * bits 0 -> 2 - priority (TX, per 802.1D) 23 * bits 3 -> 4 - port ID (user-set?) 24 * bits 5 -> 31 - physical descriptor address 25 */ 26 27 #include <linux/delay.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/dmapool.h> 30 #include <linux/etherdevice.h> 31 #include <linux/io.h> 32 #include <linux/kernel.h> 33 #include <linux/net_tstamp.h> 34 #include <linux/phy.h> 35 #include <linux/platform_device.h> 36 #include <linux/ptp_classify.h> 37 #include <linux/slab.h> 38 #include <mach/ixp46x_ts.h> 39 #include <mach/npe.h> 40 #include <mach/qmgr.h> 41 42 #define DEBUG_DESC 0 43 #define DEBUG_RX 0 44 #define DEBUG_TX 0 45 #define DEBUG_PKT_BYTES 0 46 #define DEBUG_MDIO 0 47 #define DEBUG_CLOSE 0 48 49 #define DRV_NAME "ixp4xx_eth" 50 51 #define MAX_NPES 3 52 53 #define RX_DESCS 64 /* also length of all RX queues */ 54 #define TX_DESCS 16 /* also length of all TX queues */ 55 #define TXDONE_QUEUE_LEN 64 /* dwords */ 56 57 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 58 #define REGS_SIZE 0x1000 59 #define MAX_MRU 1536 /* 0x600 */ 60 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 61 62 #define NAPI_WEIGHT 16 63 #define MDIO_INTERVAL (3 * HZ) 64 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 65 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 66 67 #define NPE_ID(port_id) ((port_id) >> 4) 68 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 69 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 70 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 71 #define TXDONE_QUEUE 31 72 73 #define PTP_SLAVE_MODE 1 74 #define PTP_MASTER_MODE 2 75 #define PORT2CHANNEL(p) NPE_ID(p->id) 76 77 /* TX Control Registers */ 78 #define TX_CNTRL0_TX_EN 0x01 79 #define TX_CNTRL0_HALFDUPLEX 0x02 80 #define TX_CNTRL0_RETRY 0x04 81 #define TX_CNTRL0_PAD_EN 0x08 82 #define TX_CNTRL0_APPEND_FCS 0x10 83 #define TX_CNTRL0_2DEFER 0x20 84 #define TX_CNTRL0_RMII 0x40 /* reduced MII */ 85 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 86 87 /* RX Control Registers */ 88 #define RX_CNTRL0_RX_EN 0x01 89 #define RX_CNTRL0_PADSTRIP_EN 0x02 90 #define RX_CNTRL0_SEND_FCS 0x04 91 #define RX_CNTRL0_PAUSE_EN 0x08 92 #define RX_CNTRL0_LOOP_EN 0x10 93 #define RX_CNTRL0_ADDR_FLTR_EN 0x20 94 #define RX_CNTRL0_RX_RUNT_EN 0x40 95 #define RX_CNTRL0_BCAST_DIS 0x80 96 #define RX_CNTRL1_DEFER_EN 0x01 97 98 /* Core Control Register */ 99 #define CORE_RESET 0x01 100 #define CORE_RX_FIFO_FLUSH 0x02 101 #define CORE_TX_FIFO_FLUSH 0x04 102 #define CORE_SEND_JAM 0x08 103 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 104 105 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 106 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 107 TX_CNTRL0_2DEFER) 108 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 109 #define DEFAULT_CORE_CNTRL CORE_MDC_EN 110 111 112 /* NPE message codes */ 113 #define NPE_GETSTATUS 0x00 114 #define NPE_EDB_SETPORTADDRESS 0x01 115 #define NPE_EDB_GETMACADDRESSDATABASE 0x02 116 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 117 #define NPE_GETSTATS 0x04 118 #define NPE_RESETSTATS 0x05 119 #define NPE_SETMAXFRAMELENGTHS 0x06 120 #define NPE_VLAN_SETRXTAGMODE 0x07 121 #define NPE_VLAN_SETDEFAULTRXVID 0x08 122 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 123 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 124 #define NPE_VLAN_SETRXQOSENTRY 0x0B 125 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 126 #define NPE_STP_SETBLOCKINGSTATE 0x0D 127 #define NPE_FW_SETFIREWALLMODE 0x0E 128 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 129 #define NPE_PC_SETAPMACTABLE 0x11 130 #define NPE_SETLOOPBACK_MODE 0x12 131 #define NPE_PC_SETBSSIDTABLE 0x13 132 #define NPE_ADDRESS_FILTER_CONFIG 0x14 133 #define NPE_APPENDFCSCONFIG 0x15 134 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 135 #define NPE_MAC_RECOVERY_START 0x17 136 137 138 #ifdef __ARMEB__ 139 typedef struct sk_buff buffer_t; 140 #define free_buffer dev_kfree_skb 141 #define free_buffer_irq dev_kfree_skb_irq 142 #else 143 typedef void buffer_t; 144 #define free_buffer kfree 145 #define free_buffer_irq kfree 146 #endif 147 148 struct eth_regs { 149 u32 tx_control[2], __res1[2]; /* 000 */ 150 u32 rx_control[2], __res2[2]; /* 010 */ 151 u32 random_seed, __res3[3]; /* 020 */ 152 u32 partial_empty_threshold, __res4; /* 030 */ 153 u32 partial_full_threshold, __res5; /* 038 */ 154 u32 tx_start_bytes, __res6[3]; /* 040 */ 155 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 156 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 157 u32 slot_time, __res9[3]; /* 070 */ 158 u32 mdio_command[4]; /* 080 */ 159 u32 mdio_status[4]; /* 090 */ 160 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 161 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 162 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 163 u32 hw_addr[6], __res13[61]; /* 0F0 */ 164 u32 core_control; /* 1FC */ 165 }; 166 167 struct port { 168 struct resource *mem_res; 169 struct eth_regs __iomem *regs; 170 struct npe *npe; 171 struct net_device *netdev; 172 struct napi_struct napi; 173 struct phy_device *phydev; 174 struct eth_plat_info *plat; 175 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 176 struct desc *desc_tab; /* coherent */ 177 u32 desc_tab_phys; 178 int id; /* logical port ID */ 179 int speed, duplex; 180 u8 firmware[4]; 181 int hwts_tx_en; 182 int hwts_rx_en; 183 }; 184 185 /* NPE message structure */ 186 struct msg { 187 #ifdef __ARMEB__ 188 u8 cmd, eth_id, byte2, byte3; 189 u8 byte4, byte5, byte6, byte7; 190 #else 191 u8 byte3, byte2, eth_id, cmd; 192 u8 byte7, byte6, byte5, byte4; 193 #endif 194 }; 195 196 /* Ethernet packet descriptor */ 197 struct desc { 198 u32 next; /* pointer to next buffer, unused */ 199 200 #ifdef __ARMEB__ 201 u16 buf_len; /* buffer length */ 202 u16 pkt_len; /* packet length */ 203 u32 data; /* pointer to data buffer in RAM */ 204 u8 dest_id; 205 u8 src_id; 206 u16 flags; 207 u8 qos; 208 u8 padlen; 209 u16 vlan_tci; 210 #else 211 u16 pkt_len; /* packet length */ 212 u16 buf_len; /* buffer length */ 213 u32 data; /* pointer to data buffer in RAM */ 214 u16 flags; 215 u8 src_id; 216 u8 dest_id; 217 u16 vlan_tci; 218 u8 padlen; 219 u8 qos; 220 #endif 221 222 #ifdef __ARMEB__ 223 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 224 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 225 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 226 #else 227 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 228 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 229 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 230 #endif 231 }; 232 233 234 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 235 (n) * sizeof(struct desc)) 236 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 237 238 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 239 ((n) + RX_DESCS) * sizeof(struct desc)) 240 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 241 242 #ifndef __ARMEB__ 243 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 244 { 245 int i; 246 for (i = 0; i < cnt; i++) 247 dest[i] = swab32(src[i]); 248 } 249 #endif 250 251 static spinlock_t mdio_lock; 252 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 253 static struct mii_bus *mdio_bus; 254 static int ports_open; 255 static struct port *npe_port_tab[MAX_NPES]; 256 static struct dma_pool *dma_pool; 257 258 static struct sock_filter ptp_filter[] = { 259 PTP_FILTER 260 }; 261 262 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 263 { 264 u8 *data = skb->data; 265 unsigned int offset; 266 u16 *hi, *id; 267 u32 lo; 268 269 if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4) 270 return 0; 271 272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 273 274 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) 275 return 0; 276 277 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); 278 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 279 280 memcpy(&lo, &hi[1], sizeof(lo)); 281 282 return (uid_hi == ntohs(*hi) && 283 uid_lo == ntohl(lo) && 284 seqid == ntohs(*id)); 285 } 286 287 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) 288 { 289 struct skb_shared_hwtstamps *shhwtstamps; 290 struct ixp46x_ts_regs *regs; 291 u64 ns; 292 u32 ch, hi, lo, val; 293 u16 uid, seq; 294 295 if (!port->hwts_rx_en) 296 return; 297 298 ch = PORT2CHANNEL(port); 299 300 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 301 302 val = __raw_readl(®s->channel[ch].ch_event); 303 304 if (!(val & RX_SNAPSHOT_LOCKED)) 305 return; 306 307 lo = __raw_readl(®s->channel[ch].src_uuid_lo); 308 hi = __raw_readl(®s->channel[ch].src_uuid_hi); 309 310 uid = hi & 0xffff; 311 seq = (hi >> 16) & 0xffff; 312 313 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) 314 goto out; 315 316 lo = __raw_readl(®s->channel[ch].rx_snap_lo); 317 hi = __raw_readl(®s->channel[ch].rx_snap_hi); 318 ns = ((u64) hi) << 32; 319 ns |= lo; 320 ns <<= TICKS_NS_SHIFT; 321 322 shhwtstamps = skb_hwtstamps(skb); 323 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 324 shhwtstamps->hwtstamp = ns_to_ktime(ns); 325 out: 326 __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 327 } 328 329 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) 330 { 331 struct skb_shared_hwtstamps shhwtstamps; 332 struct ixp46x_ts_regs *regs; 333 struct skb_shared_info *shtx; 334 u64 ns; 335 u32 ch, cnt, hi, lo, val; 336 337 shtx = skb_shinfo(skb); 338 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) 339 shtx->tx_flags |= SKBTX_IN_PROGRESS; 340 else 341 return; 342 343 ch = PORT2CHANNEL(port); 344 345 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 346 347 /* 348 * This really stinks, but we have to poll for the Tx time stamp. 349 * Usually, the time stamp is ready after 4 to 6 microseconds. 350 */ 351 for (cnt = 0; cnt < 100; cnt++) { 352 val = __raw_readl(®s->channel[ch].ch_event); 353 if (val & TX_SNAPSHOT_LOCKED) 354 break; 355 udelay(1); 356 } 357 if (!(val & TX_SNAPSHOT_LOCKED)) { 358 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; 359 return; 360 } 361 362 lo = __raw_readl(®s->channel[ch].tx_snap_lo); 363 hi = __raw_readl(®s->channel[ch].tx_snap_hi); 364 ns = ((u64) hi) << 32; 365 ns |= lo; 366 ns <<= TICKS_NS_SHIFT; 367 368 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 369 shhwtstamps.hwtstamp = ns_to_ktime(ns); 370 skb_tstamp_tx(skb, &shhwtstamps); 371 372 __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 373 } 374 375 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 376 { 377 struct hwtstamp_config cfg; 378 struct ixp46x_ts_regs *regs; 379 struct port *port = netdev_priv(netdev); 380 int ch; 381 382 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 383 return -EFAULT; 384 385 if (cfg.flags) /* reserved for future extensions */ 386 return -EINVAL; 387 388 ch = PORT2CHANNEL(port); 389 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 390 391 switch (cfg.tx_type) { 392 case HWTSTAMP_TX_OFF: 393 port->hwts_tx_en = 0; 394 break; 395 case HWTSTAMP_TX_ON: 396 port->hwts_tx_en = 1; 397 break; 398 default: 399 return -ERANGE; 400 } 401 402 switch (cfg.rx_filter) { 403 case HWTSTAMP_FILTER_NONE: 404 port->hwts_rx_en = 0; 405 break; 406 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 407 port->hwts_rx_en = PTP_SLAVE_MODE; 408 __raw_writel(0, ®s->channel[ch].ch_control); 409 break; 410 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 411 port->hwts_rx_en = PTP_MASTER_MODE; 412 __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); 413 break; 414 default: 415 return -ERANGE; 416 } 417 418 /* Clear out any old time stamps. */ 419 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 420 ®s->channel[ch].ch_event); 421 422 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 423 } 424 425 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 426 int write, u16 cmd) 427 { 428 int cycles = 0; 429 430 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 431 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); 432 return -1; 433 } 434 435 if (write) { 436 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 437 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 438 } 439 __raw_writel(((phy_id << 5) | location) & 0xFF, 440 &mdio_regs->mdio_command[2]); 441 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 442 &mdio_regs->mdio_command[3]); 443 444 while ((cycles < MAX_MDIO_RETRIES) && 445 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 446 udelay(1); 447 cycles++; 448 } 449 450 if (cycles == MAX_MDIO_RETRIES) { 451 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, 452 phy_id); 453 return -1; 454 } 455 456 #if DEBUG_MDIO 457 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, 458 phy_id, write ? "write" : "read", cycles); 459 #endif 460 461 if (write) 462 return 0; 463 464 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 465 #if DEBUG_MDIO 466 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, 467 phy_id); 468 #endif 469 return 0xFFFF; /* don't return error */ 470 } 471 472 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 473 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); 474 } 475 476 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) 477 { 478 unsigned long flags; 479 int ret; 480 481 spin_lock_irqsave(&mdio_lock, flags); 482 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); 483 spin_unlock_irqrestore(&mdio_lock, flags); 484 #if DEBUG_MDIO 485 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, 486 phy_id, location, ret); 487 #endif 488 return ret; 489 } 490 491 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, 492 u16 val) 493 { 494 unsigned long flags; 495 int ret; 496 497 spin_lock_irqsave(&mdio_lock, flags); 498 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); 499 spin_unlock_irqrestore(&mdio_lock, flags); 500 #if DEBUG_MDIO 501 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", 502 bus->name, phy_id, location, val, ret); 503 #endif 504 return ret; 505 } 506 507 static int ixp4xx_mdio_register(void) 508 { 509 int err; 510 511 if (!(mdio_bus = mdiobus_alloc())) 512 return -ENOMEM; 513 514 if (cpu_is_ixp43x()) { 515 /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ 516 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) 517 return -ENODEV; 518 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; 519 } else { 520 /* All MII PHY accesses use NPE-B Ethernet registers */ 521 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 522 return -ENODEV; 523 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 524 } 525 526 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 527 spin_lock_init(&mdio_lock); 528 mdio_bus->name = "IXP4xx MII Bus"; 529 mdio_bus->read = &ixp4xx_mdio_read; 530 mdio_bus->write = &ixp4xx_mdio_write; 531 strcpy(mdio_bus->id, "0"); 532 533 if ((err = mdiobus_register(mdio_bus))) 534 mdiobus_free(mdio_bus); 535 return err; 536 } 537 538 static void ixp4xx_mdio_remove(void) 539 { 540 mdiobus_unregister(mdio_bus); 541 mdiobus_free(mdio_bus); 542 } 543 544 545 static void ixp4xx_adjust_link(struct net_device *dev) 546 { 547 struct port *port = netdev_priv(dev); 548 struct phy_device *phydev = port->phydev; 549 550 if (!phydev->link) { 551 if (port->speed) { 552 port->speed = 0; 553 printk(KERN_INFO "%s: link down\n", dev->name); 554 } 555 return; 556 } 557 558 if (port->speed == phydev->speed && port->duplex == phydev->duplex) 559 return; 560 561 port->speed = phydev->speed; 562 port->duplex = phydev->duplex; 563 564 if (port->duplex) 565 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 566 &port->regs->tx_control[0]); 567 else 568 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 569 &port->regs->tx_control[0]); 570 571 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", 572 dev->name, port->speed, port->duplex ? "full" : "half"); 573 } 574 575 576 static inline void debug_pkt(struct net_device *dev, const char *func, 577 u8 *data, int len) 578 { 579 #if DEBUG_PKT_BYTES 580 int i; 581 582 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len); 583 for (i = 0; i < len; i++) { 584 if (i >= DEBUG_PKT_BYTES) 585 break; 586 printk("%s%02X", 587 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 588 data[i]); 589 } 590 printk("\n"); 591 #endif 592 } 593 594 595 static inline void debug_desc(u32 phys, struct desc *desc) 596 { 597 #if DEBUG_DESC 598 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 599 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 600 phys, desc->next, desc->buf_len, desc->pkt_len, 601 desc->data, desc->dest_id, desc->src_id, desc->flags, 602 desc->qos, desc->padlen, desc->vlan_tci, 603 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 604 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 605 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 606 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 607 #endif 608 } 609 610 static inline int queue_get_desc(unsigned int queue, struct port *port, 611 int is_tx) 612 { 613 u32 phys, tab_phys, n_desc; 614 struct desc *tab; 615 616 if (!(phys = qmgr_get_entry(queue))) 617 return -1; 618 619 phys &= ~0x1F; /* mask out non-address bits */ 620 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 621 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 622 n_desc = (phys - tab_phys) / sizeof(struct desc); 623 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 624 debug_desc(phys, &tab[n_desc]); 625 BUG_ON(tab[n_desc].next); 626 return n_desc; 627 } 628 629 static inline void queue_put_desc(unsigned int queue, u32 phys, 630 struct desc *desc) 631 { 632 debug_desc(phys, desc); 633 BUG_ON(phys & 0x1F); 634 qmgr_put_entry(queue, phys); 635 /* Don't check for queue overflow here, we've allocated sufficient 636 length and queues >= 32 don't support this check anyway. */ 637 } 638 639 640 static inline void dma_unmap_tx(struct port *port, struct desc *desc) 641 { 642 #ifdef __ARMEB__ 643 dma_unmap_single(&port->netdev->dev, desc->data, 644 desc->buf_len, DMA_TO_DEVICE); 645 #else 646 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 647 ALIGN((desc->data & 3) + desc->buf_len, 4), 648 DMA_TO_DEVICE); 649 #endif 650 } 651 652 653 static void eth_rx_irq(void *pdev) 654 { 655 struct net_device *dev = pdev; 656 struct port *port = netdev_priv(dev); 657 658 #if DEBUG_RX 659 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 660 #endif 661 qmgr_disable_irq(port->plat->rxq); 662 napi_schedule(&port->napi); 663 } 664 665 static int eth_poll(struct napi_struct *napi, int budget) 666 { 667 struct port *port = container_of(napi, struct port, napi); 668 struct net_device *dev = port->netdev; 669 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 670 int received = 0; 671 672 #if DEBUG_RX 673 printk(KERN_DEBUG "%s: eth_poll\n", dev->name); 674 #endif 675 676 while (received < budget) { 677 struct sk_buff *skb; 678 struct desc *desc; 679 int n; 680 #ifdef __ARMEB__ 681 struct sk_buff *temp; 682 u32 phys; 683 #endif 684 685 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 686 #if DEBUG_RX 687 printk(KERN_DEBUG "%s: eth_poll napi_complete\n", 688 dev->name); 689 #endif 690 napi_complete(napi); 691 qmgr_enable_irq(rxq); 692 if (!qmgr_stat_below_low_watermark(rxq) && 693 napi_reschedule(napi)) { /* not empty again */ 694 #if DEBUG_RX 695 printk(KERN_DEBUG "%s: eth_poll" 696 " napi_reschedule successed\n", 697 dev->name); 698 #endif 699 qmgr_disable_irq(rxq); 700 continue; 701 } 702 #if DEBUG_RX 703 printk(KERN_DEBUG "%s: eth_poll all done\n", 704 dev->name); 705 #endif 706 return received; /* all work done */ 707 } 708 709 desc = rx_desc_ptr(port, n); 710 711 #ifdef __ARMEB__ 712 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 713 phys = dma_map_single(&dev->dev, skb->data, 714 RX_BUFF_SIZE, DMA_FROM_DEVICE); 715 if (dma_mapping_error(&dev->dev, phys)) { 716 dev_kfree_skb(skb); 717 skb = NULL; 718 } 719 } 720 #else 721 skb = netdev_alloc_skb(dev, 722 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 723 #endif 724 725 if (!skb) { 726 dev->stats.rx_dropped++; 727 /* put the desc back on RX-ready queue */ 728 desc->buf_len = MAX_MRU; 729 desc->pkt_len = 0; 730 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 731 continue; 732 } 733 734 /* process received frame */ 735 #ifdef __ARMEB__ 736 temp = skb; 737 skb = port->rx_buff_tab[n]; 738 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 739 RX_BUFF_SIZE, DMA_FROM_DEVICE); 740 #else 741 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, 742 RX_BUFF_SIZE, DMA_FROM_DEVICE); 743 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 744 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 745 #endif 746 skb_reserve(skb, NET_IP_ALIGN); 747 skb_put(skb, desc->pkt_len); 748 749 debug_pkt(dev, "eth_poll", skb->data, skb->len); 750 751 ixp_rx_timestamp(port, skb); 752 skb->protocol = eth_type_trans(skb, dev); 753 dev->stats.rx_packets++; 754 dev->stats.rx_bytes += skb->len; 755 netif_receive_skb(skb); 756 757 /* put the new buffer on RX-free queue */ 758 #ifdef __ARMEB__ 759 port->rx_buff_tab[n] = temp; 760 desc->data = phys + NET_IP_ALIGN; 761 #endif 762 desc->buf_len = MAX_MRU; 763 desc->pkt_len = 0; 764 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 765 received++; 766 } 767 768 #if DEBUG_RX 769 printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); 770 #endif 771 return received; /* not all work done */ 772 } 773 774 775 static void eth_txdone_irq(void *unused) 776 { 777 u32 phys; 778 779 #if DEBUG_TX 780 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 781 #endif 782 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { 783 u32 npe_id, n_desc; 784 struct port *port; 785 struct desc *desc; 786 int start; 787 788 npe_id = phys & 3; 789 BUG_ON(npe_id >= MAX_NPES); 790 port = npe_port_tab[npe_id]; 791 BUG_ON(!port); 792 phys &= ~0x1F; /* mask out non-address bits */ 793 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 794 BUG_ON(n_desc >= TX_DESCS); 795 desc = tx_desc_ptr(port, n_desc); 796 debug_desc(phys, desc); 797 798 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 799 port->netdev->stats.tx_packets++; 800 port->netdev->stats.tx_bytes += desc->pkt_len; 801 802 dma_unmap_tx(port, desc); 803 #if DEBUG_TX 804 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 805 port->netdev->name, port->tx_buff_tab[n_desc]); 806 #endif 807 free_buffer_irq(port->tx_buff_tab[n_desc]); 808 port->tx_buff_tab[n_desc] = NULL; 809 } 810 811 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 812 queue_put_desc(port->plat->txreadyq, phys, desc); 813 if (start) { /* TX-ready queue was empty */ 814 #if DEBUG_TX 815 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 816 port->netdev->name); 817 #endif 818 netif_wake_queue(port->netdev); 819 } 820 } 821 } 822 823 static int eth_xmit(struct sk_buff *skb, struct net_device *dev) 824 { 825 struct port *port = netdev_priv(dev); 826 unsigned int txreadyq = port->plat->txreadyq; 827 int len, offset, bytes, n; 828 void *mem; 829 u32 phys; 830 struct desc *desc; 831 832 #if DEBUG_TX 833 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); 834 #endif 835 836 if (unlikely(skb->len > MAX_MRU)) { 837 dev_kfree_skb(skb); 838 dev->stats.tx_errors++; 839 return NETDEV_TX_OK; 840 } 841 842 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 843 844 len = skb->len; 845 #ifdef __ARMEB__ 846 offset = 0; /* no need to keep alignment */ 847 bytes = len; 848 mem = skb->data; 849 #else 850 offset = (int)skb->data & 3; /* keep 32-bit alignment */ 851 bytes = ALIGN(offset + len, 4); 852 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 853 dev_kfree_skb(skb); 854 dev->stats.tx_dropped++; 855 return NETDEV_TX_OK; 856 } 857 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 858 #endif 859 860 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 861 if (dma_mapping_error(&dev->dev, phys)) { 862 dev_kfree_skb(skb); 863 #ifndef __ARMEB__ 864 kfree(mem); 865 #endif 866 dev->stats.tx_dropped++; 867 return NETDEV_TX_OK; 868 } 869 870 n = queue_get_desc(txreadyq, port, 1); 871 BUG_ON(n < 0); 872 desc = tx_desc_ptr(port, n); 873 874 #ifdef __ARMEB__ 875 port->tx_buff_tab[n] = skb; 876 #else 877 port->tx_buff_tab[n] = mem; 878 #endif 879 desc->data = phys + offset; 880 desc->buf_len = desc->pkt_len = len; 881 882 /* NPE firmware pads short frames with zeros internally */ 883 wmb(); 884 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 885 886 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 887 #if DEBUG_TX 888 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); 889 #endif 890 netif_stop_queue(dev); 891 /* we could miss TX ready interrupt */ 892 /* really empty in fact */ 893 if (!qmgr_stat_below_low_watermark(txreadyq)) { 894 #if DEBUG_TX 895 printk(KERN_DEBUG "%s: eth_xmit ready again\n", 896 dev->name); 897 #endif 898 netif_wake_queue(dev); 899 } 900 } 901 902 #if DEBUG_TX 903 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); 904 #endif 905 906 ixp_tx_timestamp(port, skb); 907 skb_tx_timestamp(skb); 908 909 #ifndef __ARMEB__ 910 dev_kfree_skb(skb); 911 #endif 912 return NETDEV_TX_OK; 913 } 914 915 916 static void eth_set_mcast_list(struct net_device *dev) 917 { 918 struct port *port = netdev_priv(dev); 919 struct netdev_hw_addr *ha; 920 u8 diffs[ETH_ALEN], *addr; 921 int i; 922 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 923 924 if (dev->flags & IFF_ALLMULTI) { 925 for (i = 0; i < ETH_ALEN; i++) { 926 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 927 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 928 } 929 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 930 &port->regs->rx_control[0]); 931 return; 932 } 933 934 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 935 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 936 &port->regs->rx_control[0]); 937 return; 938 } 939 940 memset(diffs, 0, ETH_ALEN); 941 942 addr = NULL; 943 netdev_for_each_mc_addr(ha, dev) { 944 if (!addr) 945 addr = ha->addr; /* first MAC address */ 946 for (i = 0; i < ETH_ALEN; i++) 947 diffs[i] |= addr[i] ^ ha->addr[i]; 948 } 949 950 for (i = 0; i < ETH_ALEN; i++) { 951 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 952 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 953 } 954 955 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 956 &port->regs->rx_control[0]); 957 } 958 959 960 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 961 { 962 struct port *port = netdev_priv(dev); 963 964 if (!netif_running(dev)) 965 return -EINVAL; 966 967 if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP) 968 return hwtstamp_ioctl(dev, req, cmd); 969 970 return phy_mii_ioctl(port->phydev, req, cmd); 971 } 972 973 /* ethtool support */ 974 975 static void ixp4xx_get_drvinfo(struct net_device *dev, 976 struct ethtool_drvinfo *info) 977 { 978 struct port *port = netdev_priv(dev); 979 strcpy(info->driver, DRV_NAME); 980 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", 981 port->firmware[0], port->firmware[1], 982 port->firmware[2], port->firmware[3]); 983 strcpy(info->bus_info, "internal"); 984 } 985 986 static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 987 { 988 struct port *port = netdev_priv(dev); 989 return phy_ethtool_gset(port->phydev, cmd); 990 } 991 992 static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 993 { 994 struct port *port = netdev_priv(dev); 995 return phy_ethtool_sset(port->phydev, cmd); 996 } 997 998 static int ixp4xx_nway_reset(struct net_device *dev) 999 { 1000 struct port *port = netdev_priv(dev); 1001 return phy_start_aneg(port->phydev); 1002 } 1003 1004 static const struct ethtool_ops ixp4xx_ethtool_ops = { 1005 .get_drvinfo = ixp4xx_get_drvinfo, 1006 .get_settings = ixp4xx_get_settings, 1007 .set_settings = ixp4xx_set_settings, 1008 .nway_reset = ixp4xx_nway_reset, 1009 .get_link = ethtool_op_get_link, 1010 }; 1011 1012 1013 static int request_queues(struct port *port) 1014 { 1015 int err; 1016 1017 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 1018 "%s:RX-free", port->netdev->name); 1019 if (err) 1020 return err; 1021 1022 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 1023 "%s:RX", port->netdev->name); 1024 if (err) 1025 goto rel_rxfree; 1026 1027 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 1028 "%s:TX", port->netdev->name); 1029 if (err) 1030 goto rel_rx; 1031 1032 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 1033 "%s:TX-ready", port->netdev->name); 1034 if (err) 1035 goto rel_tx; 1036 1037 /* TX-done queue handles skbs sent out by the NPEs */ 1038 if (!ports_open) { 1039 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 1040 "%s:TX-done", DRV_NAME); 1041 if (err) 1042 goto rel_txready; 1043 } 1044 return 0; 1045 1046 rel_txready: 1047 qmgr_release_queue(port->plat->txreadyq); 1048 rel_tx: 1049 qmgr_release_queue(TX_QUEUE(port->id)); 1050 rel_rx: 1051 qmgr_release_queue(port->plat->rxq); 1052 rel_rxfree: 1053 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1054 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 1055 port->netdev->name); 1056 return err; 1057 } 1058 1059 static void release_queues(struct port *port) 1060 { 1061 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1062 qmgr_release_queue(port->plat->rxq); 1063 qmgr_release_queue(TX_QUEUE(port->id)); 1064 qmgr_release_queue(port->plat->txreadyq); 1065 1066 if (!ports_open) 1067 qmgr_release_queue(TXDONE_QUEUE); 1068 } 1069 1070 static int init_queues(struct port *port) 1071 { 1072 int i; 1073 1074 if (!ports_open) 1075 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 1076 POOL_ALLOC_SIZE, 32, 0))) 1077 return -ENOMEM; 1078 1079 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 1080 &port->desc_tab_phys))) 1081 return -ENOMEM; 1082 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); 1083 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 1084 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 1085 1086 /* Setup RX buffers */ 1087 for (i = 0; i < RX_DESCS; i++) { 1088 struct desc *desc = rx_desc_ptr(port, i); 1089 buffer_t *buff; /* skb or kmalloc()ated memory */ 1090 void *data; 1091 #ifdef __ARMEB__ 1092 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 1093 return -ENOMEM; 1094 data = buff->data; 1095 #else 1096 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 1097 return -ENOMEM; 1098 data = buff; 1099 #endif 1100 desc->buf_len = MAX_MRU; 1101 desc->data = dma_map_single(&port->netdev->dev, data, 1102 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1103 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 1104 free_buffer(buff); 1105 return -EIO; 1106 } 1107 desc->data += NET_IP_ALIGN; 1108 port->rx_buff_tab[i] = buff; 1109 } 1110 1111 return 0; 1112 } 1113 1114 static void destroy_queues(struct port *port) 1115 { 1116 int i; 1117 1118 if (port->desc_tab) { 1119 for (i = 0; i < RX_DESCS; i++) { 1120 struct desc *desc = rx_desc_ptr(port, i); 1121 buffer_t *buff = port->rx_buff_tab[i]; 1122 if (buff) { 1123 dma_unmap_single(&port->netdev->dev, 1124 desc->data - NET_IP_ALIGN, 1125 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1126 free_buffer(buff); 1127 } 1128 } 1129 for (i = 0; i < TX_DESCS; i++) { 1130 struct desc *desc = tx_desc_ptr(port, i); 1131 buffer_t *buff = port->tx_buff_tab[i]; 1132 if (buff) { 1133 dma_unmap_tx(port, desc); 1134 free_buffer(buff); 1135 } 1136 } 1137 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1138 port->desc_tab = NULL; 1139 } 1140 1141 if (!ports_open && dma_pool) { 1142 dma_pool_destroy(dma_pool); 1143 dma_pool = NULL; 1144 } 1145 } 1146 1147 static int eth_open(struct net_device *dev) 1148 { 1149 struct port *port = netdev_priv(dev); 1150 struct npe *npe = port->npe; 1151 struct msg msg; 1152 int i, err; 1153 1154 if (!npe_running(npe)) { 1155 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 1156 if (err) 1157 return err; 1158 1159 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 1160 printk(KERN_ERR "%s: %s not responding\n", dev->name, 1161 npe_name(npe)); 1162 return -EIO; 1163 } 1164 port->firmware[0] = msg.byte4; 1165 port->firmware[1] = msg.byte5; 1166 port->firmware[2] = msg.byte6; 1167 port->firmware[3] = msg.byte7; 1168 } 1169 1170 memset(&msg, 0, sizeof(msg)); 1171 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 1172 msg.eth_id = port->id; 1173 msg.byte5 = port->plat->rxq | 0x80; 1174 msg.byte7 = port->plat->rxq << 4; 1175 for (i = 0; i < 8; i++) { 1176 msg.byte3 = i; 1177 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 1178 return -EIO; 1179 } 1180 1181 msg.cmd = NPE_EDB_SETPORTADDRESS; 1182 msg.eth_id = PHYSICAL_ID(port->id); 1183 msg.byte2 = dev->dev_addr[0]; 1184 msg.byte3 = dev->dev_addr[1]; 1185 msg.byte4 = dev->dev_addr[2]; 1186 msg.byte5 = dev->dev_addr[3]; 1187 msg.byte6 = dev->dev_addr[4]; 1188 msg.byte7 = dev->dev_addr[5]; 1189 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 1190 return -EIO; 1191 1192 memset(&msg, 0, sizeof(msg)); 1193 msg.cmd = NPE_FW_SETFIREWALLMODE; 1194 msg.eth_id = port->id; 1195 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 1196 return -EIO; 1197 1198 if ((err = request_queues(port)) != 0) 1199 return err; 1200 1201 if ((err = init_queues(port)) != 0) { 1202 destroy_queues(port); 1203 release_queues(port); 1204 return err; 1205 } 1206 1207 port->speed = 0; /* force "link up" message */ 1208 phy_start(port->phydev); 1209 1210 for (i = 0; i < ETH_ALEN; i++) 1211 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1212 __raw_writel(0x08, &port->regs->random_seed); 1213 __raw_writel(0x12, &port->regs->partial_empty_threshold); 1214 __raw_writel(0x30, &port->regs->partial_full_threshold); 1215 __raw_writel(0x08, &port->regs->tx_start_bytes); 1216 __raw_writel(0x15, &port->regs->tx_deferral); 1217 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 1218 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 1219 __raw_writel(0x80, &port->regs->slot_time); 1220 __raw_writel(0x01, &port->regs->int_clock_threshold); 1221 1222 /* Populate queues with buffers, no failure after this point */ 1223 for (i = 0; i < TX_DESCS; i++) 1224 queue_put_desc(port->plat->txreadyq, 1225 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1226 1227 for (i = 0; i < RX_DESCS; i++) 1228 queue_put_desc(RXFREE_QUEUE(port->id), 1229 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1230 1231 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1232 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1233 __raw_writel(0, &port->regs->rx_control[1]); 1234 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1235 1236 napi_enable(&port->napi); 1237 eth_set_mcast_list(dev); 1238 netif_start_queue(dev); 1239 1240 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1241 eth_rx_irq, dev); 1242 if (!ports_open) { 1243 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1244 eth_txdone_irq, NULL); 1245 qmgr_enable_irq(TXDONE_QUEUE); 1246 } 1247 ports_open++; 1248 /* we may already have RX data, enables IRQ */ 1249 napi_schedule(&port->napi); 1250 return 0; 1251 } 1252 1253 static int eth_close(struct net_device *dev) 1254 { 1255 struct port *port = netdev_priv(dev); 1256 struct msg msg; 1257 int buffs = RX_DESCS; /* allocated RX buffers */ 1258 int i; 1259 1260 ports_open--; 1261 qmgr_disable_irq(port->plat->rxq); 1262 napi_disable(&port->napi); 1263 netif_stop_queue(dev); 1264 1265 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1266 buffs--; 1267 1268 memset(&msg, 0, sizeof(msg)); 1269 msg.cmd = NPE_SETLOOPBACK_MODE; 1270 msg.eth_id = port->id; 1271 msg.byte3 = 1; 1272 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1273 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); 1274 1275 i = 0; 1276 do { /* drain RX buffers */ 1277 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1278 buffs--; 1279 if (!buffs) 1280 break; 1281 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1282 /* we have to inject some packet */ 1283 struct desc *desc; 1284 u32 phys; 1285 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1286 BUG_ON(n < 0); 1287 desc = tx_desc_ptr(port, n); 1288 phys = tx_desc_phys(port, n); 1289 desc->buf_len = desc->pkt_len = 1; 1290 wmb(); 1291 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1292 } 1293 udelay(1); 1294 } while (++i < MAX_CLOSE_WAIT); 1295 1296 if (buffs) 1297 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" 1298 " left in NPE\n", dev->name, buffs); 1299 #if DEBUG_CLOSE 1300 if (!buffs) 1301 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); 1302 #endif 1303 1304 buffs = TX_DESCS; 1305 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1306 buffs--; /* cancel TX */ 1307 1308 i = 0; 1309 do { 1310 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1311 buffs--; 1312 if (!buffs) 1313 break; 1314 } while (++i < MAX_CLOSE_WAIT); 1315 1316 if (buffs) 1317 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " 1318 "left in NPE\n", dev->name, buffs); 1319 #if DEBUG_CLOSE 1320 if (!buffs) 1321 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); 1322 #endif 1323 1324 msg.byte3 = 0; 1325 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1326 printk(KERN_CRIT "%s: unable to disable loopback\n", 1327 dev->name); 1328 1329 phy_stop(port->phydev); 1330 1331 if (!ports_open) 1332 qmgr_disable_irq(TXDONE_QUEUE); 1333 destroy_queues(port); 1334 release_queues(port); 1335 return 0; 1336 } 1337 1338 static const struct net_device_ops ixp4xx_netdev_ops = { 1339 .ndo_open = eth_open, 1340 .ndo_stop = eth_close, 1341 .ndo_start_xmit = eth_xmit, 1342 .ndo_set_rx_mode = eth_set_mcast_list, 1343 .ndo_do_ioctl = eth_ioctl, 1344 .ndo_change_mtu = eth_change_mtu, 1345 .ndo_set_mac_address = eth_mac_addr, 1346 .ndo_validate_addr = eth_validate_addr, 1347 }; 1348 1349 static int __devinit eth_init_one(struct platform_device *pdev) 1350 { 1351 struct port *port; 1352 struct net_device *dev; 1353 struct eth_plat_info *plat = pdev->dev.platform_data; 1354 u32 regs_phys; 1355 char phy_id[MII_BUS_ID_SIZE + 3]; 1356 int err; 1357 1358 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { 1359 pr_err("ixp4xx_eth: bad ptp filter\n"); 1360 return -EINVAL; 1361 } 1362 1363 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1364 return -ENOMEM; 1365 1366 SET_NETDEV_DEV(dev, &pdev->dev); 1367 port = netdev_priv(dev); 1368 port->netdev = dev; 1369 port->id = pdev->id; 1370 1371 switch (port->id) { 1372 case IXP4XX_ETH_NPEA: 1373 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; 1374 regs_phys = IXP4XX_EthA_BASE_PHYS; 1375 break; 1376 case IXP4XX_ETH_NPEB: 1377 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 1378 regs_phys = IXP4XX_EthB_BASE_PHYS; 1379 break; 1380 case IXP4XX_ETH_NPEC: 1381 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; 1382 regs_phys = IXP4XX_EthC_BASE_PHYS; 1383 break; 1384 default: 1385 err = -ENODEV; 1386 goto err_free; 1387 } 1388 1389 dev->netdev_ops = &ixp4xx_netdev_ops; 1390 dev->ethtool_ops = &ixp4xx_ethtool_ops; 1391 dev->tx_queue_len = 100; 1392 1393 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); 1394 1395 if (!(port->npe = npe_request(NPE_ID(port->id)))) { 1396 err = -EIO; 1397 goto err_free; 1398 } 1399 1400 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); 1401 if (!port->mem_res) { 1402 err = -EBUSY; 1403 goto err_npe_rel; 1404 } 1405 1406 port->plat = plat; 1407 npe_port_tab[NPE_ID(port->id)] = port; 1408 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); 1409 1410 platform_set_drvdata(pdev, dev); 1411 1412 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1413 &port->regs->core_control); 1414 udelay(50); 1415 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1416 udelay(50); 1417 1418 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy); 1419 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, 1420 PHY_INTERFACE_MODE_MII); 1421 if (IS_ERR(port->phydev)) { 1422 err = PTR_ERR(port->phydev); 1423 goto err_free_mem; 1424 } 1425 1426 port->phydev->irq = PHY_POLL; 1427 1428 if ((err = register_netdev(dev))) 1429 goto err_phy_dis; 1430 1431 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1432 npe_name(port->npe)); 1433 1434 return 0; 1435 1436 err_phy_dis: 1437 phy_disconnect(port->phydev); 1438 err_free_mem: 1439 npe_port_tab[NPE_ID(port->id)] = NULL; 1440 platform_set_drvdata(pdev, NULL); 1441 release_resource(port->mem_res); 1442 err_npe_rel: 1443 npe_release(port->npe); 1444 err_free: 1445 free_netdev(dev); 1446 return err; 1447 } 1448 1449 static int __devexit eth_remove_one(struct platform_device *pdev) 1450 { 1451 struct net_device *dev = platform_get_drvdata(pdev); 1452 struct port *port = netdev_priv(dev); 1453 1454 unregister_netdev(dev); 1455 phy_disconnect(port->phydev); 1456 npe_port_tab[NPE_ID(port->id)] = NULL; 1457 platform_set_drvdata(pdev, NULL); 1458 npe_release(port->npe); 1459 release_resource(port->mem_res); 1460 free_netdev(dev); 1461 return 0; 1462 } 1463 1464 static struct platform_driver ixp4xx_eth_driver = { 1465 .driver.name = DRV_NAME, 1466 .probe = eth_init_one, 1467 .remove = eth_remove_one, 1468 }; 1469 1470 static int __init eth_init_module(void) 1471 { 1472 int err; 1473 if ((err = ixp4xx_mdio_register())) 1474 return err; 1475 return platform_driver_register(&ixp4xx_eth_driver); 1476 } 1477 1478 static void __exit eth_cleanup_module(void) 1479 { 1480 platform_driver_unregister(&ixp4xx_eth_driver); 1481 ixp4xx_mdio_remove(); 1482 } 1483 1484 MODULE_AUTHOR("Krzysztof Halasa"); 1485 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1486 MODULE_LICENSE("GPL v2"); 1487 MODULE_ALIAS("platform:ixp4xx_eth"); 1488 module_init(eth_init_module); 1489 module_exit(eth_cleanup_module); 1490