1 // SPDX-License-Identifier: GPL-2.0 2 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. 3 * 4 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle 5 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. 6 * 7 * References: 8 * o IOC3 ASIC specification 4.51, 1996-04-18 9 * o IEEE 802.3 specification, 2000 edition 10 * o DP38840A Specification, National Semiconductor, March 1997 11 * 12 * To do: 13 * 14 * o Use prefetching for large packets. What is a good lower limit for 15 * prefetching? 16 * o Use hardware checksums. 17 * o Convert to using a IOC3 meta driver. 18 * o Which PHYs might possibly be attached to the IOC3 in real live, 19 * which workarounds are required for them? Do we ever have Lucent's? 20 * o For the 2.5 branch kill the mii-tool ioctls. 21 */ 22 23 #define IOC3_NAME "ioc3-eth" 24 #define IOC3_VERSION "2.6.3-4" 25 26 #include <linux/delay.h> 27 #include <linux/kernel.h> 28 #include <linux/mm.h> 29 #include <linux/errno.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/crc32.h> 33 #include <linux/mii.h> 34 #include <linux/in.h> 35 #include <linux/io.h> 36 #include <linux/ip.h> 37 #include <linux/tcp.h> 38 #include <linux/udp.h> 39 #include <linux/gfp.h> 40 41 #ifdef CONFIG_SERIAL_8250 42 #include <linux/serial_core.h> 43 #include <linux/serial_8250.h> 44 #include <linux/serial_reg.h> 45 #endif 46 47 #include <linux/netdevice.h> 48 #include <linux/etherdevice.h> 49 #include <linux/ethtool.h> 50 #include <linux/skbuff.h> 51 #include <linux/dma-mapping.h> 52 53 #include <net/ip.h> 54 55 #include <asm/byteorder.h> 56 #include <asm/pgtable.h> 57 #include <linux/uaccess.h> 58 #include <asm/sn/types.h> 59 #include <asm/sn/ioc3.h> 60 #include <asm/pci/bridge.h> 61 62 /* Number of RX buffers. This is tunable in the range of 16 <= x < 512. 63 * The value must be a power of two. 64 */ 65 #define RX_BUFFS 64 66 #define RX_RING_ENTRIES 512 /* fixed in hardware */ 67 #define RX_RING_MASK (RX_RING_ENTRIES - 1) 68 #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) 69 70 /* 128 TX buffers (not tunable) */ 71 #define TX_RING_ENTRIES 128 72 #define TX_RING_MASK (TX_RING_ENTRIES - 1) 73 #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) 74 75 /* IOC3 does dma transfers in 128 byte blocks */ 76 #define IOC3_DMA_XFER_LEN 128UL 77 78 /* Every RX buffer starts with 8 byte descriptor data */ 79 #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) 80 #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) 81 82 #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) 83 #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) 84 85 /* Private per NIC data of the driver. */ 86 struct ioc3_private { 87 struct ioc3_ethregs *regs; 88 struct ioc3 *all_regs; 89 struct device *dma_dev; 90 u32 *ssram; 91 unsigned long *rxr; /* pointer to receiver ring */ 92 void *tx_ring; 93 struct ioc3_etxd *txr; 94 dma_addr_t rxr_dma; 95 dma_addr_t txr_dma; 96 struct sk_buff *rx_skbs[RX_RING_ENTRIES]; 97 struct sk_buff *tx_skbs[TX_RING_ENTRIES]; 98 int rx_ci; /* RX consumer index */ 99 int rx_pi; /* RX producer index */ 100 int tx_ci; /* TX consumer index */ 101 int tx_pi; /* TX producer index */ 102 int txqlen; 103 u32 emcr, ehar_h, ehar_l; 104 spinlock_t ioc3_lock; 105 struct mii_if_info mii; 106 107 struct net_device *dev; 108 struct pci_dev *pdev; 109 110 /* Members used by autonegotiation */ 111 struct timer_list ioc3_timer; 112 }; 113 114 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 115 static void ioc3_set_multicast_list(struct net_device *dev); 116 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 117 static void ioc3_timeout(struct net_device *dev); 118 static inline unsigned int ioc3_hash(const unsigned char *addr); 119 static void ioc3_start(struct ioc3_private *ip); 120 static inline void ioc3_stop(struct ioc3_private *ip); 121 static void ioc3_init(struct net_device *dev); 122 static int ioc3_alloc_rx_bufs(struct net_device *dev); 123 static void ioc3_free_rx_bufs(struct ioc3_private *ip); 124 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); 125 126 static const char ioc3_str[] = "IOC3 Ethernet"; 127 static const struct ethtool_ops ioc3_ethtool_ops; 128 129 130 static inline unsigned long aligned_rx_skb_addr(unsigned long addr) 131 { 132 return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); 133 } 134 135 static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, 136 struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) 137 { 138 struct sk_buff *new_skb; 139 dma_addr_t d; 140 int offset; 141 142 new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); 143 if (!new_skb) 144 return -ENOMEM; 145 146 /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ 147 offset = aligned_rx_skb_addr((unsigned long)new_skb->data); 148 if (offset) 149 skb_reserve(new_skb, offset); 150 151 d = dma_map_single(ip->dma_dev, new_skb->data, 152 RX_BUF_SIZE, DMA_FROM_DEVICE); 153 154 if (dma_mapping_error(ip->dma_dev, d)) { 155 dev_kfree_skb_any(new_skb); 156 return -ENOMEM; 157 } 158 *rxb_dma = d; 159 *rxb = (struct ioc3_erxbuf *)new_skb->data; 160 skb_reserve(new_skb, RX_OFFSET); 161 *skb = new_skb; 162 163 return 0; 164 } 165 166 #ifdef CONFIG_PCI_XTALK_BRIDGE 167 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) 168 { 169 return (addr & ~PCI64_ATTR_BAR) | attr; 170 } 171 172 #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) 173 #else 174 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) 175 { 176 return addr; 177 } 178 179 #define ERBAR_VAL 0 180 #endif 181 182 #define IOC3_SIZE 0x100000 183 184 static inline u32 mcr_pack(u32 pulse, u32 sample) 185 { 186 return (pulse << 10) | (sample << 2); 187 } 188 189 static int nic_wait(u32 __iomem *mcr) 190 { 191 u32 m; 192 193 do { 194 m = readl(mcr); 195 } while (!(m & 2)); 196 197 return m & 1; 198 } 199 200 static int nic_reset(u32 __iomem *mcr) 201 { 202 int presence; 203 204 writel(mcr_pack(500, 65), mcr); 205 presence = nic_wait(mcr); 206 207 writel(mcr_pack(0, 500), mcr); 208 nic_wait(mcr); 209 210 return presence; 211 } 212 213 static inline int nic_read_bit(u32 __iomem *mcr) 214 { 215 int result; 216 217 writel(mcr_pack(6, 13), mcr); 218 result = nic_wait(mcr); 219 writel(mcr_pack(0, 100), mcr); 220 nic_wait(mcr); 221 222 return result; 223 } 224 225 static inline void nic_write_bit(u32 __iomem *mcr, int bit) 226 { 227 if (bit) 228 writel(mcr_pack(6, 110), mcr); 229 else 230 writel(mcr_pack(80, 30), mcr); 231 232 nic_wait(mcr); 233 } 234 235 /* Read a byte from an iButton device 236 */ 237 static u32 nic_read_byte(u32 __iomem *mcr) 238 { 239 u32 result = 0; 240 int i; 241 242 for (i = 0; i < 8; i++) 243 result = (result >> 1) | (nic_read_bit(mcr) << 7); 244 245 return result; 246 } 247 248 /* Write a byte to an iButton device 249 */ 250 static void nic_write_byte(u32 __iomem *mcr, int byte) 251 { 252 int i, bit; 253 254 for (i = 8; i; i--) { 255 bit = byte & 1; 256 byte >>= 1; 257 258 nic_write_bit(mcr, bit); 259 } 260 } 261 262 static u64 nic_find(u32 __iomem *mcr, int *last) 263 { 264 int a, b, index, disc; 265 u64 address = 0; 266 267 nic_reset(mcr); 268 /* Search ROM. */ 269 nic_write_byte(mcr, 0xf0); 270 271 /* Algorithm from ``Book of iButton Standards''. */ 272 for (index = 0, disc = 0; index < 64; index++) { 273 a = nic_read_bit(mcr); 274 b = nic_read_bit(mcr); 275 276 if (a && b) { 277 pr_warn("NIC search failed (not fatal).\n"); 278 *last = 0; 279 return 0; 280 } 281 282 if (!a && !b) { 283 if (index == *last) { 284 address |= 1UL << index; 285 } else if (index > *last) { 286 address &= ~(1UL << index); 287 disc = index; 288 } else if ((address & (1UL << index)) == 0) { 289 disc = index; 290 } 291 nic_write_bit(mcr, address & (1UL << index)); 292 continue; 293 } else { 294 if (a) 295 address |= 1UL << index; 296 else 297 address &= ~(1UL << index); 298 nic_write_bit(mcr, a); 299 continue; 300 } 301 } 302 303 *last = disc; 304 305 return address; 306 } 307 308 static int nic_init(u32 __iomem *mcr) 309 { 310 const char *unknown = "unknown"; 311 const char *type = unknown; 312 u8 crc; 313 u8 serial[6]; 314 int save = 0, i; 315 316 while (1) { 317 u64 reg; 318 319 reg = nic_find(mcr, &save); 320 321 switch (reg & 0xff) { 322 case 0x91: 323 type = "DS1981U"; 324 break; 325 default: 326 if (save == 0) { 327 /* Let the caller try again. */ 328 return -1; 329 } 330 continue; 331 } 332 333 nic_reset(mcr); 334 335 /* Match ROM. */ 336 nic_write_byte(mcr, 0x55); 337 for (i = 0; i < 8; i++) 338 nic_write_byte(mcr, (reg >> (i << 3)) & 0xff); 339 340 reg >>= 8; /* Shift out type. */ 341 for (i = 0; i < 6; i++) { 342 serial[i] = reg & 0xff; 343 reg >>= 8; 344 } 345 crc = reg & 0xff; 346 break; 347 } 348 349 pr_info("Found %s NIC", type); 350 if (type != unknown) 351 pr_cont(" registration number %pM, CRC %02x", serial, crc); 352 pr_cont(".\n"); 353 354 return 0; 355 } 356 357 /* Read the NIC (Number-In-a-Can) device used to store the MAC address on 358 * SN0 / SN00 nodeboards and PCI cards. 359 */ 360 static void ioc3_get_eaddr_nic(struct ioc3_private *ip) 361 { 362 u32 __iomem *mcr = &ip->all_regs->mcr; 363 int tries = 2; /* There may be some problem with the battery? */ 364 u8 nic[14]; 365 int i; 366 367 writel(1 << 21, &ip->all_regs->gpcr_s); 368 369 while (tries--) { 370 if (!nic_init(mcr)) 371 break; 372 udelay(500); 373 } 374 375 if (tries < 0) { 376 pr_err("Failed to read MAC address\n"); 377 return; 378 } 379 380 /* Read Memory. */ 381 nic_write_byte(mcr, 0xf0); 382 nic_write_byte(mcr, 0x00); 383 nic_write_byte(mcr, 0x00); 384 385 for (i = 13; i >= 0; i--) 386 nic[i] = nic_read_byte(mcr); 387 388 for (i = 2; i < 8; i++) 389 ip->dev->dev_addr[i - 2] = nic[i]; 390 } 391 392 /* Ok, this is hosed by design. It's necessary to know what machine the 393 * NIC is in in order to know how to read the NIC address. We also have 394 * to know if it's a PCI card or a NIC in on the node board ... 395 */ 396 static void ioc3_get_eaddr(struct ioc3_private *ip) 397 { 398 ioc3_get_eaddr_nic(ip); 399 400 pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr); 401 } 402 403 static void __ioc3_set_mac_address(struct net_device *dev) 404 { 405 struct ioc3_private *ip = netdev_priv(dev); 406 407 writel((dev->dev_addr[5] << 8) | 408 dev->dev_addr[4], 409 &ip->regs->emar_h); 410 writel((dev->dev_addr[3] << 24) | 411 (dev->dev_addr[2] << 16) | 412 (dev->dev_addr[1] << 8) | 413 dev->dev_addr[0], 414 &ip->regs->emar_l); 415 } 416 417 static int ioc3_set_mac_address(struct net_device *dev, void *addr) 418 { 419 struct ioc3_private *ip = netdev_priv(dev); 420 struct sockaddr *sa = addr; 421 422 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 423 424 spin_lock_irq(&ip->ioc3_lock); 425 __ioc3_set_mac_address(dev); 426 spin_unlock_irq(&ip->ioc3_lock); 427 428 return 0; 429 } 430 431 /* Caller must hold the ioc3_lock ever for MII readers. This is also 432 * used to protect the transmitter side but it's low contention. 433 */ 434 static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) 435 { 436 struct ioc3_private *ip = netdev_priv(dev); 437 struct ioc3_ethregs *regs = ip->regs; 438 439 while (readl(®s->micr) & MICR_BUSY) 440 ; 441 writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, 442 ®s->micr); 443 while (readl(®s->micr) & MICR_BUSY) 444 ; 445 446 return readl(®s->midr_r) & MIDR_DATA_MASK; 447 } 448 449 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) 450 { 451 struct ioc3_private *ip = netdev_priv(dev); 452 struct ioc3_ethregs *regs = ip->regs; 453 454 while (readl(®s->micr) & MICR_BUSY) 455 ; 456 writel(data, ®s->midr_w); 457 writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr); 458 while (readl(®s->micr) & MICR_BUSY) 459 ; 460 } 461 462 static int ioc3_mii_init(struct ioc3_private *ip); 463 464 static struct net_device_stats *ioc3_get_stats(struct net_device *dev) 465 { 466 struct ioc3_private *ip = netdev_priv(dev); 467 struct ioc3_ethregs *regs = ip->regs; 468 469 dev->stats.collisions += readl(®s->etcdc) & ETCDC_COLLCNT_MASK; 470 return &dev->stats; 471 } 472 473 static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) 474 { 475 struct ethhdr *eh = eth_hdr(skb); 476 unsigned int proto; 477 unsigned char *cp; 478 struct iphdr *ih; 479 u32 csum, ehsum; 480 u16 *ew; 481 482 /* Did hardware handle the checksum at all? The cases we can handle 483 * are: 484 * 485 * - TCP and UDP checksums of IPv4 only. 486 * - IPv6 would be doable but we keep that for later ... 487 * - Only unfragmented packets. Did somebody already tell you 488 * fragmentation is evil? 489 * - don't care about packet size. Worst case when processing a 490 * malformed packet we'll try to access the packet at ip header + 491 * 64 bytes which is still inside the skb. Even in the unlikely 492 * case where the checksum is right the higher layers will still 493 * drop the packet as appropriate. 494 */ 495 if (eh->h_proto != htons(ETH_P_IP)) 496 return; 497 498 ih = (struct iphdr *)((char *)eh + ETH_HLEN); 499 if (ip_is_fragment(ih)) 500 return; 501 502 proto = ih->protocol; 503 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) 504 return; 505 506 /* Same as tx - compute csum of pseudo header */ 507 csum = hwsum + 508 (ih->tot_len - (ih->ihl << 2)) + 509 htons((u16)ih->protocol) + 510 (ih->saddr >> 16) + (ih->saddr & 0xffff) + 511 (ih->daddr >> 16) + (ih->daddr & 0xffff); 512 513 /* Sum up ethernet dest addr, src addr and protocol */ 514 ew = (u16 *)eh; 515 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; 516 517 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 518 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 519 520 csum += 0xffff ^ ehsum; 521 522 /* In the next step we also subtract the 1's complement 523 * checksum of the trailing ethernet CRC. 524 */ 525 cp = (char *)eh + len; /* points at trailing CRC */ 526 if (len & 1) { 527 csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); 528 csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); 529 } else { 530 csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); 531 csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); 532 } 533 534 csum = (csum & 0xffff) + (csum >> 16); 535 csum = (csum & 0xffff) + (csum >> 16); 536 537 if (csum == 0xffff) 538 skb->ip_summed = CHECKSUM_UNNECESSARY; 539 } 540 541 static inline void ioc3_rx(struct net_device *dev) 542 { 543 struct ioc3_private *ip = netdev_priv(dev); 544 struct sk_buff *skb, *new_skb; 545 int rx_entry, n_entry, len; 546 struct ioc3_erxbuf *rxb; 547 unsigned long *rxr; 548 dma_addr_t d; 549 u32 w0, err; 550 551 rxr = ip->rxr; /* Ring base */ 552 rx_entry = ip->rx_ci; /* RX consume index */ 553 n_entry = ip->rx_pi; 554 555 skb = ip->rx_skbs[rx_entry]; 556 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); 557 w0 = be32_to_cpu(rxb->w0); 558 559 while (w0 & ERXBUF_V) { 560 err = be32_to_cpu(rxb->err); /* It's valid ... */ 561 if (err & ERXBUF_GOODPKT) { 562 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; 563 skb_put(skb, len); 564 skb->protocol = eth_type_trans(skb, dev); 565 566 if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { 567 /* Ouch, drop packet and just recycle packet 568 * to keep the ring filled. 569 */ 570 dev->stats.rx_dropped++; 571 new_skb = skb; 572 d = rxr[rx_entry]; 573 goto next; 574 } 575 576 if (likely(dev->features & NETIF_F_RXCSUM)) 577 ioc3_tcpudp_checksum(skb, 578 w0 & ERXBUF_IPCKSUM_MASK, 579 len); 580 581 dma_unmap_single(ip->dma_dev, rxr[rx_entry], 582 RX_BUF_SIZE, DMA_FROM_DEVICE); 583 584 netif_rx(skb); 585 586 ip->rx_skbs[rx_entry] = NULL; /* Poison */ 587 588 dev->stats.rx_packets++; /* Statistics */ 589 dev->stats.rx_bytes += len; 590 } else { 591 /* The frame is invalid and the skb never 592 * reached the network layer so we can just 593 * recycle it. 594 */ 595 new_skb = skb; 596 d = rxr[rx_entry]; 597 dev->stats.rx_errors++; 598 } 599 if (err & ERXBUF_CRCERR) /* Statistics */ 600 dev->stats.rx_crc_errors++; 601 if (err & ERXBUF_FRAMERR) 602 dev->stats.rx_frame_errors++; 603 604 next: 605 ip->rx_skbs[n_entry] = new_skb; 606 rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); 607 rxb->w0 = 0; /* Clear valid flag */ 608 n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ 609 610 /* Now go on to the next ring entry. */ 611 rx_entry = (rx_entry + 1) & RX_RING_MASK; 612 skb = ip->rx_skbs[rx_entry]; 613 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); 614 w0 = be32_to_cpu(rxb->w0); 615 } 616 writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); 617 ip->rx_pi = n_entry; 618 ip->rx_ci = rx_entry; 619 } 620 621 static inline void ioc3_tx(struct net_device *dev) 622 { 623 struct ioc3_private *ip = netdev_priv(dev); 624 struct ioc3_ethregs *regs = ip->regs; 625 unsigned long packets, bytes; 626 int tx_entry, o_entry; 627 struct sk_buff *skb; 628 u32 etcir; 629 630 spin_lock(&ip->ioc3_lock); 631 etcir = readl(®s->etcir); 632 633 tx_entry = (etcir >> 7) & TX_RING_MASK; 634 o_entry = ip->tx_ci; 635 packets = 0; 636 bytes = 0; 637 638 while (o_entry != tx_entry) { 639 packets++; 640 skb = ip->tx_skbs[o_entry]; 641 bytes += skb->len; 642 dev_consume_skb_irq(skb); 643 ip->tx_skbs[o_entry] = NULL; 644 645 o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ 646 647 etcir = readl(®s->etcir); /* More pkts sent? */ 648 tx_entry = (etcir >> 7) & TX_RING_MASK; 649 } 650 651 dev->stats.tx_packets += packets; 652 dev->stats.tx_bytes += bytes; 653 ip->txqlen -= packets; 654 655 if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) 656 netif_wake_queue(dev); 657 658 ip->tx_ci = o_entry; 659 spin_unlock(&ip->ioc3_lock); 660 } 661 662 /* Deal with fatal IOC3 errors. This condition might be caused by a hard or 663 * software problems, so we should try to recover 664 * more gracefully if this ever happens. In theory we might be flooded 665 * with such error interrupts if something really goes wrong, so we might 666 * also consider to take the interface down. 667 */ 668 static void ioc3_error(struct net_device *dev, u32 eisr) 669 { 670 struct ioc3_private *ip = netdev_priv(dev); 671 672 spin_lock(&ip->ioc3_lock); 673 674 if (eisr & EISR_RXOFLO) 675 net_err_ratelimited("%s: RX overflow.\n", dev->name); 676 if (eisr & EISR_RXBUFOFLO) 677 net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); 678 if (eisr & EISR_RXMEMERR) 679 net_err_ratelimited("%s: RX PCI error.\n", dev->name); 680 if (eisr & EISR_RXPARERR) 681 net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); 682 if (eisr & EISR_TXBUFUFLO) 683 net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); 684 if (eisr & EISR_TXMEMERR) 685 net_err_ratelimited("%s: TX PCI error.\n", dev->name); 686 687 ioc3_stop(ip); 688 ioc3_free_rx_bufs(ip); 689 ioc3_clean_tx_ring(ip); 690 691 ioc3_init(dev); 692 if (ioc3_alloc_rx_bufs(dev)) { 693 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 694 spin_unlock(&ip->ioc3_lock); 695 return; 696 } 697 ioc3_start(ip); 698 ioc3_mii_init(ip); 699 700 netif_wake_queue(dev); 701 702 spin_unlock(&ip->ioc3_lock); 703 } 704 705 /* The interrupt handler does all of the Rx thread work and cleans up 706 * after the Tx thread. 707 */ 708 static irqreturn_t ioc3_interrupt(int irq, void *dev_id) 709 { 710 struct ioc3_private *ip = netdev_priv(dev_id); 711 struct ioc3_ethregs *regs = ip->regs; 712 u32 eisr; 713 714 eisr = readl(®s->eisr); 715 writel(eisr, ®s->eisr); 716 readl(®s->eisr); /* Flush */ 717 718 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | 719 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) 720 ioc3_error(dev_id, eisr); 721 if (eisr & EISR_RXTIMERINT) 722 ioc3_rx(dev_id); 723 if (eisr & EISR_TXEXPLICIT) 724 ioc3_tx(dev_id); 725 726 return IRQ_HANDLED; 727 } 728 729 static inline void ioc3_setup_duplex(struct ioc3_private *ip) 730 { 731 struct ioc3_ethregs *regs = ip->regs; 732 733 spin_lock_irq(&ip->ioc3_lock); 734 735 if (ip->mii.full_duplex) { 736 writel(ETCSR_FD, ®s->etcsr); 737 ip->emcr |= EMCR_DUPLEX; 738 } else { 739 writel(ETCSR_HD, ®s->etcsr); 740 ip->emcr &= ~EMCR_DUPLEX; 741 } 742 writel(ip->emcr, ®s->emcr); 743 744 spin_unlock_irq(&ip->ioc3_lock); 745 } 746 747 static void ioc3_timer(struct timer_list *t) 748 { 749 struct ioc3_private *ip = from_timer(ip, t, ioc3_timer); 750 751 /* Print the link status if it has changed */ 752 mii_check_media(&ip->mii, 1, 0); 753 ioc3_setup_duplex(ip); 754 755 ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ 756 add_timer(&ip->ioc3_timer); 757 } 758 759 /* Try to find a PHY. There is no apparent relation between the MII addresses 760 * in the SGI documentation and what we find in reality, so we simply probe 761 * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my 762 * onboard IOC3s has the special oddity that probing doesn't seem to find it 763 * yet the interface seems to work fine, so if probing fails we for now will 764 * simply default to PHY 31 instead of bailing out. 765 */ 766 static int ioc3_mii_init(struct ioc3_private *ip) 767 { 768 int ioc3_phy_workaround = 1; 769 int i, found = 0, res = 0; 770 u16 word; 771 772 for (i = 0; i < 32; i++) { 773 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1); 774 775 if (word != 0xffff && word != 0x0000) { 776 found = 1; 777 break; /* Found a PHY */ 778 } 779 } 780 781 if (!found) { 782 if (ioc3_phy_workaround) { 783 i = 31; 784 } else { 785 ip->mii.phy_id = -1; 786 res = -ENODEV; 787 goto out; 788 } 789 } 790 791 ip->mii.phy_id = i; 792 793 out: 794 return res; 795 } 796 797 static void ioc3_mii_start(struct ioc3_private *ip) 798 { 799 ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ 800 add_timer(&ip->ioc3_timer); 801 } 802 803 static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) 804 { 805 struct ioc3_etxd *desc; 806 u32 cmd, bufcnt, len; 807 808 desc = &ip->txr[entry]; 809 cmd = be32_to_cpu(desc->cmd); 810 bufcnt = be32_to_cpu(desc->bufcnt); 811 if (cmd & ETXD_B1V) { 812 len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; 813 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), 814 len, DMA_TO_DEVICE); 815 } 816 if (cmd & ETXD_B2V) { 817 len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; 818 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), 819 len, DMA_TO_DEVICE); 820 } 821 } 822 823 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) 824 { 825 struct sk_buff *skb; 826 int i; 827 828 for (i = 0; i < TX_RING_ENTRIES; i++) { 829 skb = ip->tx_skbs[i]; 830 if (skb) { 831 ioc3_tx_unmap(ip, i); 832 ip->tx_skbs[i] = NULL; 833 dev_kfree_skb_any(skb); 834 } 835 ip->txr[i].cmd = 0; 836 } 837 ip->tx_pi = 0; 838 ip->tx_ci = 0; 839 } 840 841 static void ioc3_free_rx_bufs(struct ioc3_private *ip) 842 { 843 int rx_entry, n_entry; 844 struct sk_buff *skb; 845 846 n_entry = ip->rx_ci; 847 rx_entry = ip->rx_pi; 848 849 while (n_entry != rx_entry) { 850 skb = ip->rx_skbs[n_entry]; 851 if (skb) { 852 dma_unmap_single(ip->dma_dev, 853 be64_to_cpu(ip->rxr[n_entry]), 854 RX_BUF_SIZE, DMA_FROM_DEVICE); 855 dev_kfree_skb_any(skb); 856 } 857 n_entry = (n_entry + 1) & RX_RING_MASK; 858 } 859 } 860 861 static int ioc3_alloc_rx_bufs(struct net_device *dev) 862 { 863 struct ioc3_private *ip = netdev_priv(dev); 864 struct ioc3_erxbuf *rxb; 865 dma_addr_t d; 866 int i; 867 868 /* Now the rx buffers. The RX ring may be larger but 869 * we only allocate 16 buffers for now. Need to tune 870 * this for performance and memory later. 871 */ 872 for (i = 0; i < RX_BUFFS; i++) { 873 if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) 874 return -ENOMEM; 875 876 rxb->w0 = 0; /* Clear valid flag */ 877 ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); 878 } 879 ip->rx_ci = 0; 880 ip->rx_pi = RX_BUFFS; 881 882 return 0; 883 } 884 885 static inline void ioc3_ssram_disc(struct ioc3_private *ip) 886 { 887 struct ioc3_ethregs *regs = ip->regs; 888 u32 *ssram0 = &ip->ssram[0x0000]; 889 u32 *ssram1 = &ip->ssram[0x4000]; 890 u32 pattern = 0x5555; 891 892 /* Assume the larger size SSRAM and enable parity checking */ 893 writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr); 894 readl(®s->emcr); /* Flush */ 895 896 writel(pattern, ssram0); 897 writel(~pattern & IOC3_SSRAM_DM, ssram1); 898 899 if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || 900 (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { 901 /* set ssram size to 64 KB */ 902 ip->emcr |= EMCR_RAMPAR; 903 writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr); 904 } else { 905 ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; 906 } 907 } 908 909 static void ioc3_init(struct net_device *dev) 910 { 911 struct ioc3_private *ip = netdev_priv(dev); 912 struct ioc3_ethregs *regs = ip->regs; 913 914 del_timer_sync(&ip->ioc3_timer); /* Kill if running */ 915 916 writel(EMCR_RST, ®s->emcr); /* Reset */ 917 readl(®s->emcr); /* Flush WB */ 918 udelay(4); /* Give it time ... */ 919 writel(0, ®s->emcr); 920 readl(®s->emcr); 921 922 /* Misc registers */ 923 writel(ERBAR_VAL, ®s->erbar); 924 readl(®s->etcdc); /* Clear on read */ 925 writel(15, ®s->ercsr); /* RX low watermark */ 926 writel(0, ®s->ertr); /* Interrupt immediately */ 927 __ioc3_set_mac_address(dev); 928 writel(ip->ehar_h, ®s->ehar_h); 929 writel(ip->ehar_l, ®s->ehar_l); 930 writel(42, ®s->ersr); /* XXX should be random */ 931 } 932 933 static void ioc3_start(struct ioc3_private *ip) 934 { 935 struct ioc3_ethregs *regs = ip->regs; 936 unsigned long ring; 937 938 /* Now the rx ring base, consume & produce registers. */ 939 ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); 940 writel(ring >> 32, ®s->erbr_h); 941 writel(ring & 0xffffffff, ®s->erbr_l); 942 writel(ip->rx_ci << 3, ®s->ercir); 943 writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir); 944 945 ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); 946 947 ip->txqlen = 0; /* nothing queued */ 948 949 /* Now the tx ring base, consume & produce registers. */ 950 writel(ring >> 32, ®s->etbr_h); 951 writel(ring & 0xffffffff, ®s->etbr_l); 952 writel(ip->tx_pi << 7, ®s->etpir); 953 writel(ip->tx_ci << 7, ®s->etcir); 954 readl(®s->etcir); /* Flush */ 955 956 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | 957 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; 958 writel(ip->emcr, ®s->emcr); 959 writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 960 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 961 EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier); 962 readl(®s->eier); 963 } 964 965 static inline void ioc3_stop(struct ioc3_private *ip) 966 { 967 struct ioc3_ethregs *regs = ip->regs; 968 969 writel(0, ®s->emcr); /* Shutup */ 970 writel(0, ®s->eier); /* Disable interrupts */ 971 readl(®s->eier); /* Flush */ 972 } 973 974 static int ioc3_open(struct net_device *dev) 975 { 976 struct ioc3_private *ip = netdev_priv(dev); 977 978 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) { 979 netdev_err(dev, "Can't get irq %d\n", dev->irq); 980 981 return -EAGAIN; 982 } 983 984 ip->ehar_h = 0; 985 ip->ehar_l = 0; 986 987 ioc3_init(dev); 988 if (ioc3_alloc_rx_bufs(dev)) { 989 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 990 return -ENOMEM; 991 } 992 ioc3_start(ip); 993 ioc3_mii_start(ip); 994 995 netif_start_queue(dev); 996 return 0; 997 } 998 999 static int ioc3_close(struct net_device *dev) 1000 { 1001 struct ioc3_private *ip = netdev_priv(dev); 1002 1003 del_timer_sync(&ip->ioc3_timer); 1004 1005 netif_stop_queue(dev); 1006 1007 ioc3_stop(ip); 1008 free_irq(dev->irq, dev); 1009 1010 ioc3_free_rx_bufs(ip); 1011 ioc3_clean_tx_ring(ip); 1012 1013 return 0; 1014 } 1015 1016 /* MENET cards have four IOC3 chips, which are attached to two sets of 1017 * PCI slot resources each: the primary connections are on slots 1018 * 0..3 and the secondaries are on 4..7 1019 * 1020 * All four ethernets are brought out to connectors; six serial ports 1021 * (a pair from each of the first three IOC3s) are brought out to 1022 * MiniDINs; all other subdevices are left swinging in the wind, leave 1023 * them disabled. 1024 */ 1025 1026 static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot) 1027 { 1028 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0)); 1029 int ret = 0; 1030 1031 if (dev) { 1032 if (dev->vendor == PCI_VENDOR_ID_SGI && 1033 dev->device == PCI_DEVICE_ID_SGI_IOC3) 1034 ret = 1; 1035 pci_dev_put(dev); 1036 } 1037 1038 return ret; 1039 } 1040 1041 static int ioc3_is_menet(struct pci_dev *pdev) 1042 { 1043 return !pdev->bus->parent && 1044 ioc3_adjacent_is_ioc3(pdev, 0) && 1045 ioc3_adjacent_is_ioc3(pdev, 1) && 1046 ioc3_adjacent_is_ioc3(pdev, 2); 1047 } 1048 1049 #ifdef CONFIG_SERIAL_8250 1050 /* Note about serial ports and consoles: 1051 * For console output, everyone uses the IOC3 UARTA (offset 0x178) 1052 * connected to the master node (look in ip27_setup_console() and 1053 * ip27prom_console_write()). 1054 * 1055 * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port 1056 * addresses on a partitioned machine. Since we currently use the ioc3 1057 * serial ports, we use dynamic serial port discovery that the serial.c 1058 * driver uses for pci/pnp ports (there is an entry for the SGI ioc3 1059 * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater 1060 * than UARTB's, although UARTA on o200s has traditionally been known as 1061 * port 0. So, we just use one serial port from each ioc3 (since the 1062 * serial driver adds addresses to get to higher ports). 1063 * 1064 * The first one to do a register_console becomes the preferred console 1065 * (if there is no kernel command line console= directive). /dev/console 1066 * (ie 5, 1) is then "aliased" into the device number returned by the 1067 * "device" routine referred to in this console structure 1068 * (ip27prom_console_dev). 1069 * 1070 * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working 1071 * around ioc3 oddities in this respect. 1072 * 1073 * The IOC3 serials use a 22MHz clock rate with an additional divider which 1074 * can be programmed in the SCR register if the DLAB bit is set. 1075 * 1076 * Register to interrupt zero because we share the interrupt with 1077 * the serial driver which we don't properly support yet. 1078 * 1079 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been 1080 * registered. 1081 */ 1082 static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart) 1083 { 1084 #define COSMISC_CONSTANT 6 1085 1086 struct uart_8250_port port = { 1087 .port = { 1088 .irq = 0, 1089 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, 1090 .iotype = UPIO_MEM, 1091 .regshift = 0, 1092 .uartclk = (22000000 << 1) / COSMISC_CONSTANT, 1093 1094 .membase = (unsigned char __iomem *)uart, 1095 .mapbase = (unsigned long)uart, 1096 } 1097 }; 1098 unsigned char lcr; 1099 1100 lcr = readb(&uart->iu_lcr); 1101 writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr); 1102 writeb(COSMISC_CONSTANT, &uart->iu_scr); 1103 writeb(lcr, &uart->iu_lcr); 1104 readb(&uart->iu_lcr); 1105 serial8250_register_8250_port(&port); 1106 } 1107 1108 static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) 1109 { 1110 u32 sio_iec; 1111 1112 /* We need to recognice and treat the fourth MENET serial as it 1113 * does not have an SuperIO chip attached to it, therefore attempting 1114 * to access it will result in bus errors. We call something an 1115 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3 1116 * in it. This is paranoid but we want to avoid blowing up on a 1117 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's 1118 * not paranoid enough ... 1119 */ 1120 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3) 1121 return; 1122 1123 /* Switch IOC3 to PIO mode. It probably already was but let's be 1124 * paranoid 1125 */ 1126 writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s); 1127 readl(&ioc3->gpcr_s); 1128 writel(0, &ioc3->gppr[6]); 1129 readl(&ioc3->gppr[6]); 1130 writel(0, &ioc3->gppr[7]); 1131 readl(&ioc3->gppr[7]); 1132 writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr); 1133 readl(&ioc3->port_a.sscr); 1134 writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr); 1135 readl(&ioc3->port_b.sscr); 1136 /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ 1137 sio_iec = readl(&ioc3->sio_iec); 1138 sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | 1139 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | 1140 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | 1141 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); 1142 sio_iec |= SIO_IR_SA_INT; 1143 sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | 1144 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | 1145 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | 1146 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); 1147 sio_iec |= SIO_IR_SB_INT; 1148 writel(sio_iec, &ioc3->sio_iec); 1149 writel(0, &ioc3->port_a.sscr); 1150 writel(0, &ioc3->port_b.sscr); 1151 1152 ioc3_8250_register(&ioc3->sregs.uarta); 1153 ioc3_8250_register(&ioc3->sregs.uartb); 1154 } 1155 #endif 1156 1157 static const struct net_device_ops ioc3_netdev_ops = { 1158 .ndo_open = ioc3_open, 1159 .ndo_stop = ioc3_close, 1160 .ndo_start_xmit = ioc3_start_xmit, 1161 .ndo_tx_timeout = ioc3_timeout, 1162 .ndo_get_stats = ioc3_get_stats, 1163 .ndo_set_rx_mode = ioc3_set_multicast_list, 1164 .ndo_do_ioctl = ioc3_ioctl, 1165 .ndo_validate_addr = eth_validate_addr, 1166 .ndo_set_mac_address = ioc3_set_mac_address, 1167 }; 1168 1169 static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1170 { 1171 unsigned int sw_physid1, sw_physid2; 1172 struct net_device *dev = NULL; 1173 struct ioc3_private *ip; 1174 struct ioc3 *ioc3; 1175 unsigned long ioc3_base, ioc3_size; 1176 u32 vendor, model, rev; 1177 int err; 1178 1179 /* Configure DMA attributes. */ 1180 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1181 if (err) { 1182 pr_err("%s: No usable DMA configuration, aborting.\n", 1183 pci_name(pdev)); 1184 goto out; 1185 } 1186 1187 if (pci_enable_device(pdev)) 1188 return -ENODEV; 1189 1190 dev = alloc_etherdev(sizeof(struct ioc3_private)); 1191 if (!dev) { 1192 err = -ENOMEM; 1193 goto out_disable; 1194 } 1195 1196 err = pci_request_regions(pdev, "ioc3"); 1197 if (err) 1198 goto out_free; 1199 1200 SET_NETDEV_DEV(dev, &pdev->dev); 1201 1202 ip = netdev_priv(dev); 1203 ip->dev = dev; 1204 ip->dma_dev = &pdev->dev; 1205 1206 dev->irq = pdev->irq; 1207 1208 ioc3_base = pci_resource_start(pdev, 0); 1209 ioc3_size = pci_resource_len(pdev, 0); 1210 ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size); 1211 if (!ioc3) { 1212 pr_err("ioc3eth(%s): ioremap failed, goodbye.\n", 1213 pci_name(pdev)); 1214 err = -ENOMEM; 1215 goto out_res; 1216 } 1217 ip->regs = &ioc3->eth; 1218 ip->ssram = ioc3->ssram; 1219 ip->all_regs = ioc3; 1220 1221 #ifdef CONFIG_SERIAL_8250 1222 ioc3_serial_probe(pdev, ioc3); 1223 #endif 1224 1225 spin_lock_init(&ip->ioc3_lock); 1226 timer_setup(&ip->ioc3_timer, ioc3_timer, 0); 1227 1228 ioc3_stop(ip); 1229 1230 /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ 1231 ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma, 1232 GFP_KERNEL); 1233 if (!ip->rxr) { 1234 pr_err("ioc3-eth: rx ring allocation failed\n"); 1235 err = -ENOMEM; 1236 goto out_stop; 1237 } 1238 1239 /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ 1240 ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, 1241 &ip->txr_dma, GFP_KERNEL); 1242 if (!ip->tx_ring) { 1243 pr_err("ioc3-eth: tx ring allocation failed\n"); 1244 err = -ENOMEM; 1245 goto out_stop; 1246 } 1247 /* Align TX ring */ 1248 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); 1249 ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); 1250 1251 ioc3_init(dev); 1252 1253 ip->pdev = pdev; 1254 1255 ip->mii.phy_id_mask = 0x1f; 1256 ip->mii.reg_num_mask = 0x1f; 1257 ip->mii.dev = dev; 1258 ip->mii.mdio_read = ioc3_mdio_read; 1259 ip->mii.mdio_write = ioc3_mdio_write; 1260 1261 ioc3_mii_init(ip); 1262 1263 if (ip->mii.phy_id == -1) { 1264 pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n", 1265 pci_name(pdev)); 1266 err = -ENODEV; 1267 goto out_stop; 1268 } 1269 1270 ioc3_mii_start(ip); 1271 ioc3_ssram_disc(ip); 1272 ioc3_get_eaddr(ip); 1273 1274 /* The IOC3-specific entries in the device structure. */ 1275 dev->watchdog_timeo = 5 * HZ; 1276 dev->netdev_ops = &ioc3_netdev_ops; 1277 dev->ethtool_ops = &ioc3_ethtool_ops; 1278 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1279 dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; 1280 1281 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); 1282 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); 1283 1284 err = register_netdev(dev); 1285 if (err) 1286 goto out_stop; 1287 1288 mii_check_media(&ip->mii, 1, 1); 1289 ioc3_setup_duplex(ip); 1290 1291 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); 1292 model = (sw_physid2 >> 4) & 0x3f; 1293 rev = sw_physid2 & 0xf; 1294 netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n", 1295 ip->mii.phy_id, vendor, model, rev); 1296 netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n", 1297 ip->emcr & EMCR_BUFSIZ ? 128 : 64); 1298 1299 return 0; 1300 1301 out_stop: 1302 del_timer_sync(&ip->ioc3_timer); 1303 if (ip->rxr) 1304 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, 1305 ip->rxr_dma); 1306 if (ip->tx_ring) 1307 dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, 1308 ip->txr_dma); 1309 out_res: 1310 pci_release_regions(pdev); 1311 out_free: 1312 free_netdev(dev); 1313 out_disable: 1314 /* We should call pci_disable_device(pdev); here if the IOC3 wasn't 1315 * such a weird device ... 1316 */ 1317 out: 1318 return err; 1319 } 1320 1321 static void ioc3_remove_one(struct pci_dev *pdev) 1322 { 1323 struct net_device *dev = pci_get_drvdata(pdev); 1324 struct ioc3_private *ip = netdev_priv(dev); 1325 1326 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); 1327 dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma); 1328 1329 unregister_netdev(dev); 1330 del_timer_sync(&ip->ioc3_timer); 1331 1332 iounmap(ip->all_regs); 1333 pci_release_regions(pdev); 1334 free_netdev(dev); 1335 /* We should call pci_disable_device(pdev); here if the IOC3 wasn't 1336 * such a weird device ... 1337 */ 1338 } 1339 1340 static const struct pci_device_id ioc3_pci_tbl[] = { 1341 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, 1342 { 0 } 1343 }; 1344 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl); 1345 1346 static struct pci_driver ioc3_driver = { 1347 .name = "ioc3-eth", 1348 .id_table = ioc3_pci_tbl, 1349 .probe = ioc3_probe, 1350 .remove = ioc3_remove_one, 1351 }; 1352 1353 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1354 { 1355 struct ioc3_private *ip = netdev_priv(dev); 1356 struct ioc3_etxd *desc; 1357 unsigned long data; 1358 unsigned int len; 1359 int produce; 1360 u32 w0 = 0; 1361 1362 /* IOC3 has a fairly simple minded checksumming hardware which simply 1363 * adds up the 1's complement checksum for the entire packet and 1364 * inserts it at an offset which can be specified in the descriptor 1365 * into the transmit packet. This means we have to compensate for the 1366 * MAC header which should not be summed and the TCP/UDP pseudo headers 1367 * manually. 1368 */ 1369 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1370 const struct iphdr *ih = ip_hdr(skb); 1371 const int proto = ntohs(ih->protocol); 1372 unsigned int csoff; 1373 u32 csum, ehsum; 1374 u16 *eh; 1375 1376 /* The MAC header. skb->mac seem the logic approach 1377 * to find the MAC header - except it's a NULL pointer ... 1378 */ 1379 eh = (u16 *)skb->data; 1380 1381 /* Sum up dest addr, src addr and protocol */ 1382 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; 1383 1384 /* Skip IP header; it's sum is always zero and was 1385 * already filled in by ip_output.c 1386 */ 1387 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, 1388 ih->tot_len - (ih->ihl << 2), 1389 proto, csum_fold(ehsum)); 1390 1391 csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ 1392 csum = (csum & 0xffff) + (csum >> 16); 1393 1394 csoff = ETH_HLEN + (ih->ihl << 2); 1395 if (proto == IPPROTO_UDP) { 1396 csoff += offsetof(struct udphdr, check); 1397 udp_hdr(skb)->check = csum; 1398 } 1399 if (proto == IPPROTO_TCP) { 1400 csoff += offsetof(struct tcphdr, check); 1401 tcp_hdr(skb)->check = csum; 1402 } 1403 1404 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); 1405 } 1406 1407 spin_lock_irq(&ip->ioc3_lock); 1408 1409 data = (unsigned long)skb->data; 1410 len = skb->len; 1411 1412 produce = ip->tx_pi; 1413 desc = &ip->txr[produce]; 1414 1415 if (len <= 104) { 1416 /* Short packet, let's copy it directly into the ring. */ 1417 skb_copy_from_linear_data(skb, desc->data, skb->len); 1418 if (len < ETH_ZLEN) { 1419 /* Very short packet, pad with zeros at the end. */ 1420 memset(desc->data + len, 0, ETH_ZLEN - len); 1421 len = ETH_ZLEN; 1422 } 1423 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); 1424 desc->bufcnt = cpu_to_be32(len); 1425 } else if ((data ^ (data + len - 1)) & 0x4000) { 1426 unsigned long b2 = (data | 0x3fffUL) + 1UL; 1427 unsigned long s1 = b2 - data; 1428 unsigned long s2 = data + len - b2; 1429 dma_addr_t d1, d2; 1430 1431 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | 1432 ETXD_B1V | ETXD_B2V | w0); 1433 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | 1434 (s2 << ETXD_B2CNT_SHIFT)); 1435 d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); 1436 if (dma_mapping_error(ip->dma_dev, d1)) 1437 goto drop_packet; 1438 d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); 1439 if (dma_mapping_error(ip->dma_dev, d2)) { 1440 dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); 1441 goto drop_packet; 1442 } 1443 desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); 1444 desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); 1445 } else { 1446 dma_addr_t d; 1447 1448 /* Normal sized packet that doesn't cross a page boundary. */ 1449 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); 1450 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); 1451 d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); 1452 if (dma_mapping_error(ip->dma_dev, d)) 1453 goto drop_packet; 1454 desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); 1455 } 1456 1457 mb(); /* make sure all descriptor changes are visible */ 1458 1459 ip->tx_skbs[produce] = skb; /* Remember skb */ 1460 produce = (produce + 1) & TX_RING_MASK; 1461 ip->tx_pi = produce; 1462 writel(produce << 7, &ip->regs->etpir); /* Fire ... */ 1463 1464 ip->txqlen++; 1465 1466 if (ip->txqlen >= (TX_RING_ENTRIES - 1)) 1467 netif_stop_queue(dev); 1468 1469 spin_unlock_irq(&ip->ioc3_lock); 1470 1471 return NETDEV_TX_OK; 1472 1473 drop_packet: 1474 dev_kfree_skb_any(skb); 1475 dev->stats.tx_dropped++; 1476 1477 spin_unlock_irq(&ip->ioc3_lock); 1478 1479 return NETDEV_TX_OK; 1480 } 1481 1482 static void ioc3_timeout(struct net_device *dev) 1483 { 1484 struct ioc3_private *ip = netdev_priv(dev); 1485 1486 netdev_err(dev, "transmit timed out, resetting\n"); 1487 1488 spin_lock_irq(&ip->ioc3_lock); 1489 1490 ioc3_stop(ip); 1491 ioc3_free_rx_bufs(ip); 1492 ioc3_clean_tx_ring(ip); 1493 1494 ioc3_init(dev); 1495 if (ioc3_alloc_rx_bufs(dev)) { 1496 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 1497 spin_unlock_irq(&ip->ioc3_lock); 1498 return; 1499 } 1500 ioc3_start(ip); 1501 ioc3_mii_init(ip); 1502 ioc3_mii_start(ip); 1503 1504 spin_unlock_irq(&ip->ioc3_lock); 1505 1506 netif_wake_queue(dev); 1507 } 1508 1509 /* Given a multicast ethernet address, this routine calculates the 1510 * address's bit index in the logical address filter mask 1511 */ 1512 static inline unsigned int ioc3_hash(const unsigned char *addr) 1513 { 1514 unsigned int temp = 0; 1515 int bits; 1516 u32 crc; 1517 1518 crc = ether_crc_le(ETH_ALEN, addr); 1519 1520 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ 1521 for (bits = 6; --bits >= 0; ) { 1522 temp <<= 1; 1523 temp |= (crc & 0x1); 1524 crc >>= 1; 1525 } 1526 1527 return temp; 1528 } 1529 1530 static void ioc3_get_drvinfo(struct net_device *dev, 1531 struct ethtool_drvinfo *info) 1532 { 1533 struct ioc3_private *ip = netdev_priv(dev); 1534 1535 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver)); 1536 strlcpy(info->version, IOC3_VERSION, sizeof(info->version)); 1537 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); 1538 } 1539 1540 static int ioc3_get_link_ksettings(struct net_device *dev, 1541 struct ethtool_link_ksettings *cmd) 1542 { 1543 struct ioc3_private *ip = netdev_priv(dev); 1544 1545 spin_lock_irq(&ip->ioc3_lock); 1546 mii_ethtool_get_link_ksettings(&ip->mii, cmd); 1547 spin_unlock_irq(&ip->ioc3_lock); 1548 1549 return 0; 1550 } 1551 1552 static int ioc3_set_link_ksettings(struct net_device *dev, 1553 const struct ethtool_link_ksettings *cmd) 1554 { 1555 struct ioc3_private *ip = netdev_priv(dev); 1556 int rc; 1557 1558 spin_lock_irq(&ip->ioc3_lock); 1559 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd); 1560 spin_unlock_irq(&ip->ioc3_lock); 1561 1562 return rc; 1563 } 1564 1565 static int ioc3_nway_reset(struct net_device *dev) 1566 { 1567 struct ioc3_private *ip = netdev_priv(dev); 1568 int rc; 1569 1570 spin_lock_irq(&ip->ioc3_lock); 1571 rc = mii_nway_restart(&ip->mii); 1572 spin_unlock_irq(&ip->ioc3_lock); 1573 1574 return rc; 1575 } 1576 1577 static u32 ioc3_get_link(struct net_device *dev) 1578 { 1579 struct ioc3_private *ip = netdev_priv(dev); 1580 int rc; 1581 1582 spin_lock_irq(&ip->ioc3_lock); 1583 rc = mii_link_ok(&ip->mii); 1584 spin_unlock_irq(&ip->ioc3_lock); 1585 1586 return rc; 1587 } 1588 1589 static const struct ethtool_ops ioc3_ethtool_ops = { 1590 .get_drvinfo = ioc3_get_drvinfo, 1591 .nway_reset = ioc3_nway_reset, 1592 .get_link = ioc3_get_link, 1593 .get_link_ksettings = ioc3_get_link_ksettings, 1594 .set_link_ksettings = ioc3_set_link_ksettings, 1595 }; 1596 1597 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1598 { 1599 struct ioc3_private *ip = netdev_priv(dev); 1600 int rc; 1601 1602 spin_lock_irq(&ip->ioc3_lock); 1603 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); 1604 spin_unlock_irq(&ip->ioc3_lock); 1605 1606 return rc; 1607 } 1608 1609 static void ioc3_set_multicast_list(struct net_device *dev) 1610 { 1611 struct ioc3_private *ip = netdev_priv(dev); 1612 struct ioc3_ethregs *regs = ip->regs; 1613 struct netdev_hw_addr *ha; 1614 u64 ehar = 0; 1615 1616 spin_lock_irq(&ip->ioc3_lock); 1617 1618 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1619 ip->emcr |= EMCR_PROMISC; 1620 writel(ip->emcr, ®s->emcr); 1621 readl(®s->emcr); 1622 } else { 1623 ip->emcr &= ~EMCR_PROMISC; 1624 writel(ip->emcr, ®s->emcr); /* Clear promiscuous. */ 1625 readl(®s->emcr); 1626 1627 if ((dev->flags & IFF_ALLMULTI) || 1628 (netdev_mc_count(dev) > 64)) { 1629 /* Too many for hashing to make sense or we want all 1630 * multicast packets anyway, so skip computing all the 1631 * hashes and just accept all packets. 1632 */ 1633 ip->ehar_h = 0xffffffff; 1634 ip->ehar_l = 0xffffffff; 1635 } else { 1636 netdev_for_each_mc_addr(ha, dev) { 1637 ehar |= (1UL << ioc3_hash(ha->addr)); 1638 } 1639 ip->ehar_h = ehar >> 32; 1640 ip->ehar_l = ehar & 0xffffffff; 1641 } 1642 writel(ip->ehar_h, ®s->ehar_h); 1643 writel(ip->ehar_l, ®s->ehar_l); 1644 } 1645 1646 spin_unlock_irq(&ip->ioc3_lock); 1647 } 1648 1649 module_pci_driver(ioc3_driver); 1650 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 1651 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); 1652 MODULE_LICENSE("GPL"); 1653