1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. 7 * 8 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle 9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. 10 * 11 * References: 12 * o IOC3 ASIC specification 4.51, 1996-04-18 13 * o IEEE 802.3 specification, 2000 edition 14 * o DP38840A Specification, National Semiconductor, March 1997 15 * 16 * To do: 17 * 18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully. 19 * o Handle allocation failures in ioc3_init_rings(). 20 * o Use prefetching for large packets. What is a good lower limit for 21 * prefetching? 22 * o We're probably allocating a bit too much memory. 23 * o Use hardware checksums. 24 * o Convert to using a IOC3 meta driver. 25 * o Which PHYs might possibly be attached to the IOC3 in real live, 26 * which workarounds are required for them? Do we ever have Lucent's? 27 * o For the 2.5 branch kill the mii-tool ioctls. 28 */ 29 30 #define IOC3_NAME "ioc3-eth" 31 #define IOC3_VERSION "2.6.3-4" 32 33 #include <linux/init.h> 34 #include <linux/delay.h> 35 #include <linux/kernel.h> 36 #include <linux/mm.h> 37 #include <linux/errno.h> 38 #include <linux/module.h> 39 #include <linux/pci.h> 40 #include <linux/crc32.h> 41 #include <linux/mii.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/udp.h> 46 #include <linux/dma-mapping.h> 47 #include <linux/gfp.h> 48 49 #ifdef CONFIG_SERIAL_8250 50 #include <linux/serial_core.h> 51 #include <linux/serial_8250.h> 52 #include <linux/serial_reg.h> 53 #endif 54 55 #include <linux/netdevice.h> 56 #include <linux/etherdevice.h> 57 #include <linux/ethtool.h> 58 #include <linux/skbuff.h> 59 #include <net/ip.h> 60 61 #include <asm/byteorder.h> 62 #include <asm/io.h> 63 #include <asm/pgtable.h> 64 #include <asm/uaccess.h> 65 #include <asm/sn/types.h> 66 #include <asm/sn/ioc3.h> 67 #include <asm/pci/bridge.h> 68 69 /* 70 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The 71 * value must be a power of two. 72 */ 73 #define RX_BUFFS 64 74 75 #define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21) 76 #define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21) 77 78 /* Private per NIC data of the driver. */ 79 struct ioc3_private { 80 struct ioc3 *regs; 81 unsigned long *rxr; /* pointer to receiver ring */ 82 struct ioc3_etxd *txr; 83 struct sk_buff *rx_skbs[512]; 84 struct sk_buff *tx_skbs[128]; 85 int rx_ci; /* RX consumer index */ 86 int rx_pi; /* RX producer index */ 87 int tx_ci; /* TX consumer index */ 88 int tx_pi; /* TX producer index */ 89 int txqlen; 90 u32 emcr, ehar_h, ehar_l; 91 spinlock_t ioc3_lock; 92 struct mii_if_info mii; 93 94 struct pci_dev *pdev; 95 96 /* Members used by autonegotiation */ 97 struct timer_list ioc3_timer; 98 }; 99 100 static inline struct net_device *priv_netdev(struct ioc3_private *dev) 101 { 102 return (void *)dev - ((sizeof(struct net_device) + 31) & ~31); 103 } 104 105 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 106 static void ioc3_set_multicast_list(struct net_device *dev); 107 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 108 static void ioc3_timeout(struct net_device *dev); 109 static inline unsigned int ioc3_hash(const unsigned char *addr); 110 static inline void ioc3_stop(struct ioc3_private *ip); 111 static void ioc3_init(struct net_device *dev); 112 113 static const char ioc3_str[] = "IOC3 Ethernet"; 114 static const struct ethtool_ops ioc3_ethtool_ops; 115 116 /* We use this to acquire receive skb's that we can DMA directly into. */ 117 118 #define IOC3_CACHELINE 128UL 119 120 static inline unsigned long aligned_rx_skb_addr(unsigned long addr) 121 { 122 return (~addr + 1) & (IOC3_CACHELINE - 1UL); 123 } 124 125 static inline struct sk_buff * ioc3_alloc_skb(unsigned long length, 126 unsigned int gfp_mask) 127 { 128 struct sk_buff *skb; 129 130 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask); 131 if (likely(skb)) { 132 int offset = aligned_rx_skb_addr((unsigned long) skb->data); 133 if (offset) 134 skb_reserve(skb, offset); 135 } 136 137 return skb; 138 } 139 140 static inline unsigned long ioc3_map(void *ptr, unsigned long vdev) 141 { 142 #ifdef CONFIG_SGI_IP27 143 vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */ 144 145 return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF | 146 ((unsigned long)ptr & TO_PHYS_MASK); 147 #else 148 return virt_to_bus(ptr); 149 #endif 150 } 151 152 /* BEWARE: The IOC3 documentation documents the size of rx buffers as 153 1644 while it's actually 1664. This one was nasty to track down ... */ 154 #define RX_OFFSET 10 155 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE) 156 157 /* DMA barrier to separate cached and uncached accesses. */ 158 #define BARRIER() \ 159 __asm__("sync" ::: "memory") 160 161 162 #define IOC3_SIZE 0x100000 163 164 /* 165 * IOC3 is a big endian device 166 * 167 * Unorthodox but makes the users of these macros more readable - the pointer 168 * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3 169 * in the environment. 170 */ 171 #define ioc3_r_mcr() be32_to_cpu(ioc3->mcr) 172 #define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0) 173 #define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0) 174 #define ioc3_r_emcr() be32_to_cpu(ioc3->emcr) 175 #define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0) 176 #define ioc3_r_eisr() be32_to_cpu(ioc3->eisr) 177 #define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0) 178 #define ioc3_r_eier() be32_to_cpu(ioc3->eier) 179 #define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0) 180 #define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr) 181 #define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0) 182 #define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h) 183 #define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0) 184 #define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l) 185 #define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0) 186 #define ioc3_r_erbar() be32_to_cpu(ioc3->erbar) 187 #define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0) 188 #define ioc3_r_ercir() be32_to_cpu(ioc3->ercir) 189 #define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0) 190 #define ioc3_r_erpir() be32_to_cpu(ioc3->erpir) 191 #define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0) 192 #define ioc3_r_ertr() be32_to_cpu(ioc3->ertr) 193 #define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0) 194 #define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr) 195 #define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0) 196 #define ioc3_r_ersr() be32_to_cpu(ioc3->ersr) 197 #define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0) 198 #define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc) 199 #define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0) 200 #define ioc3_r_ebir() be32_to_cpu(ioc3->ebir) 201 #define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0) 202 #define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h) 203 #define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0) 204 #define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l) 205 #define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0) 206 #define ioc3_r_etcir() be32_to_cpu(ioc3->etcir) 207 #define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0) 208 #define ioc3_r_etpir() be32_to_cpu(ioc3->etpir) 209 #define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0) 210 #define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h) 211 #define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0) 212 #define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l) 213 #define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0) 214 #define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h) 215 #define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0) 216 #define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l) 217 #define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0) 218 #define ioc3_r_micr() be32_to_cpu(ioc3->micr) 219 #define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0) 220 #define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r) 221 #define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0) 222 #define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w) 223 #define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0) 224 225 static inline u32 mcr_pack(u32 pulse, u32 sample) 226 { 227 return (pulse << 10) | (sample << 2); 228 } 229 230 static int nic_wait(struct ioc3 *ioc3) 231 { 232 u32 mcr; 233 234 do { 235 mcr = ioc3_r_mcr(); 236 } while (!(mcr & 2)); 237 238 return mcr & 1; 239 } 240 241 static int nic_reset(struct ioc3 *ioc3) 242 { 243 int presence; 244 245 ioc3_w_mcr(mcr_pack(500, 65)); 246 presence = nic_wait(ioc3); 247 248 ioc3_w_mcr(mcr_pack(0, 500)); 249 nic_wait(ioc3); 250 251 return presence; 252 } 253 254 static inline int nic_read_bit(struct ioc3 *ioc3) 255 { 256 int result; 257 258 ioc3_w_mcr(mcr_pack(6, 13)); 259 result = nic_wait(ioc3); 260 ioc3_w_mcr(mcr_pack(0, 100)); 261 nic_wait(ioc3); 262 263 return result; 264 } 265 266 static inline void nic_write_bit(struct ioc3 *ioc3, int bit) 267 { 268 if (bit) 269 ioc3_w_mcr(mcr_pack(6, 110)); 270 else 271 ioc3_w_mcr(mcr_pack(80, 30)); 272 273 nic_wait(ioc3); 274 } 275 276 /* 277 * Read a byte from an iButton device 278 */ 279 static u32 nic_read_byte(struct ioc3 *ioc3) 280 { 281 u32 result = 0; 282 int i; 283 284 for (i = 0; i < 8; i++) 285 result = (result >> 1) | (nic_read_bit(ioc3) << 7); 286 287 return result; 288 } 289 290 /* 291 * Write a byte to an iButton device 292 */ 293 static void nic_write_byte(struct ioc3 *ioc3, int byte) 294 { 295 int i, bit; 296 297 for (i = 8; i; i--) { 298 bit = byte & 1; 299 byte >>= 1; 300 301 nic_write_bit(ioc3, bit); 302 } 303 } 304 305 static u64 nic_find(struct ioc3 *ioc3, int *last) 306 { 307 int a, b, index, disc; 308 u64 address = 0; 309 310 nic_reset(ioc3); 311 /* Search ROM. */ 312 nic_write_byte(ioc3, 0xf0); 313 314 /* Algorithm from ``Book of iButton Standards''. */ 315 for (index = 0, disc = 0; index < 64; index++) { 316 a = nic_read_bit(ioc3); 317 b = nic_read_bit(ioc3); 318 319 if (a && b) { 320 printk("NIC search failed (not fatal).\n"); 321 *last = 0; 322 return 0; 323 } 324 325 if (!a && !b) { 326 if (index == *last) { 327 address |= 1UL << index; 328 } else if (index > *last) { 329 address &= ~(1UL << index); 330 disc = index; 331 } else if ((address & (1UL << index)) == 0) 332 disc = index; 333 nic_write_bit(ioc3, address & (1UL << index)); 334 continue; 335 } else { 336 if (a) 337 address |= 1UL << index; 338 else 339 address &= ~(1UL << index); 340 nic_write_bit(ioc3, a); 341 continue; 342 } 343 } 344 345 *last = disc; 346 347 return address; 348 } 349 350 static int nic_init(struct ioc3 *ioc3) 351 { 352 const char *unknown = "unknown"; 353 const char *type = unknown; 354 u8 crc; 355 u8 serial[6]; 356 int save = 0, i; 357 358 while (1) { 359 u64 reg; 360 reg = nic_find(ioc3, &save); 361 362 switch (reg & 0xff) { 363 case 0x91: 364 type = "DS1981U"; 365 break; 366 default: 367 if (save == 0) { 368 /* Let the caller try again. */ 369 return -1; 370 } 371 continue; 372 } 373 374 nic_reset(ioc3); 375 376 /* Match ROM. */ 377 nic_write_byte(ioc3, 0x55); 378 for (i = 0; i < 8; i++) 379 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff); 380 381 reg >>= 8; /* Shift out type. */ 382 for (i = 0; i < 6; i++) { 383 serial[i] = reg & 0xff; 384 reg >>= 8; 385 } 386 crc = reg & 0xff; 387 break; 388 } 389 390 printk("Found %s NIC", type); 391 if (type != unknown) 392 printk (" registration number %pM, CRC %02x", serial, crc); 393 printk(".\n"); 394 395 return 0; 396 } 397 398 /* 399 * Read the NIC (Number-In-a-Can) device used to store the MAC address on 400 * SN0 / SN00 nodeboards and PCI cards. 401 */ 402 static void ioc3_get_eaddr_nic(struct ioc3_private *ip) 403 { 404 struct ioc3 *ioc3 = ip->regs; 405 u8 nic[14]; 406 int tries = 2; /* There may be some problem with the battery? */ 407 int i; 408 409 ioc3_w_gpcr_s(1 << 21); 410 411 while (tries--) { 412 if (!nic_init(ioc3)) 413 break; 414 udelay(500); 415 } 416 417 if (tries < 0) { 418 printk("Failed to read MAC address\n"); 419 return; 420 } 421 422 /* Read Memory. */ 423 nic_write_byte(ioc3, 0xf0); 424 nic_write_byte(ioc3, 0x00); 425 nic_write_byte(ioc3, 0x00); 426 427 for (i = 13; i >= 0; i--) 428 nic[i] = nic_read_byte(ioc3); 429 430 for (i = 2; i < 8; i++) 431 priv_netdev(ip)->dev_addr[i - 2] = nic[i]; 432 } 433 434 /* 435 * Ok, this is hosed by design. It's necessary to know what machine the 436 * NIC is in in order to know how to read the NIC address. We also have 437 * to know if it's a PCI card or a NIC in on the node board ... 438 */ 439 static void ioc3_get_eaddr(struct ioc3_private *ip) 440 { 441 ioc3_get_eaddr_nic(ip); 442 443 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); 444 } 445 446 static void __ioc3_set_mac_address(struct net_device *dev) 447 { 448 struct ioc3_private *ip = netdev_priv(dev); 449 struct ioc3 *ioc3 = ip->regs; 450 451 ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]); 452 ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | 453 (dev->dev_addr[1] << 8) | dev->dev_addr[0]); 454 } 455 456 static int ioc3_set_mac_address(struct net_device *dev, void *addr) 457 { 458 struct ioc3_private *ip = netdev_priv(dev); 459 struct sockaddr *sa = addr; 460 461 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 462 463 spin_lock_irq(&ip->ioc3_lock); 464 __ioc3_set_mac_address(dev); 465 spin_unlock_irq(&ip->ioc3_lock); 466 467 return 0; 468 } 469 470 /* 471 * Caller must hold the ioc3_lock ever for MII readers. This is also 472 * used to protect the transmitter side but it's low contention. 473 */ 474 static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) 475 { 476 struct ioc3_private *ip = netdev_priv(dev); 477 struct ioc3 *ioc3 = ip->regs; 478 479 while (ioc3_r_micr() & MICR_BUSY); 480 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG); 481 while (ioc3_r_micr() & MICR_BUSY); 482 483 return ioc3_r_midr_r() & MIDR_DATA_MASK; 484 } 485 486 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) 487 { 488 struct ioc3_private *ip = netdev_priv(dev); 489 struct ioc3 *ioc3 = ip->regs; 490 491 while (ioc3_r_micr() & MICR_BUSY); 492 ioc3_w_midr_w(data); 493 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg); 494 while (ioc3_r_micr() & MICR_BUSY); 495 } 496 497 static int ioc3_mii_init(struct ioc3_private *ip); 498 499 static struct net_device_stats *ioc3_get_stats(struct net_device *dev) 500 { 501 struct ioc3_private *ip = netdev_priv(dev); 502 struct ioc3 *ioc3 = ip->regs; 503 504 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); 505 return &dev->stats; 506 } 507 508 static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) 509 { 510 struct ethhdr *eh = eth_hdr(skb); 511 uint32_t csum, ehsum; 512 unsigned int proto; 513 struct iphdr *ih; 514 uint16_t *ew; 515 unsigned char *cp; 516 517 /* 518 * Did hardware handle the checksum at all? The cases we can handle 519 * are: 520 * 521 * - TCP and UDP checksums of IPv4 only. 522 * - IPv6 would be doable but we keep that for later ... 523 * - Only unfragmented packets. Did somebody already tell you 524 * fragmentation is evil? 525 * - don't care about packet size. Worst case when processing a 526 * malformed packet we'll try to access the packet at ip header + 527 * 64 bytes which is still inside the skb. Even in the unlikely 528 * case where the checksum is right the higher layers will still 529 * drop the packet as appropriate. 530 */ 531 if (eh->h_proto != htons(ETH_P_IP)) 532 return; 533 534 ih = (struct iphdr *) ((char *)eh + ETH_HLEN); 535 if (ip_is_fragment(ih)) 536 return; 537 538 proto = ih->protocol; 539 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) 540 return; 541 542 /* Same as tx - compute csum of pseudo header */ 543 csum = hwsum + 544 (ih->tot_len - (ih->ihl << 2)) + 545 htons((uint16_t)ih->protocol) + 546 (ih->saddr >> 16) + (ih->saddr & 0xffff) + 547 (ih->daddr >> 16) + (ih->daddr & 0xffff); 548 549 /* Sum up ethernet dest addr, src addr and protocol */ 550 ew = (uint16_t *) eh; 551 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; 552 553 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 554 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 555 556 csum += 0xffff ^ ehsum; 557 558 /* In the next step we also subtract the 1's complement 559 checksum of the trailing ethernet CRC. */ 560 cp = (char *)eh + len; /* points at trailing CRC */ 561 if (len & 1) { 562 csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]); 563 csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]); 564 } else { 565 csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]); 566 csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]); 567 } 568 569 csum = (csum & 0xffff) + (csum >> 16); 570 csum = (csum & 0xffff) + (csum >> 16); 571 572 if (csum == 0xffff) 573 skb->ip_summed = CHECKSUM_UNNECESSARY; 574 } 575 576 static inline void ioc3_rx(struct net_device *dev) 577 { 578 struct ioc3_private *ip = netdev_priv(dev); 579 struct sk_buff *skb, *new_skb; 580 struct ioc3 *ioc3 = ip->regs; 581 int rx_entry, n_entry, len; 582 struct ioc3_erxbuf *rxb; 583 unsigned long *rxr; 584 u32 w0, err; 585 586 rxr = ip->rxr; /* Ring base */ 587 rx_entry = ip->rx_ci; /* RX consume index */ 588 n_entry = ip->rx_pi; 589 590 skb = ip->rx_skbs[rx_entry]; 591 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 592 w0 = be32_to_cpu(rxb->w0); 593 594 while (w0 & ERXBUF_V) { 595 err = be32_to_cpu(rxb->err); /* It's valid ... */ 596 if (err & ERXBUF_GOODPKT) { 597 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; 598 skb_trim(skb, len); 599 skb->protocol = eth_type_trans(skb, dev); 600 601 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 602 if (!new_skb) { 603 /* Ouch, drop packet and just recycle packet 604 to keep the ring filled. */ 605 dev->stats.rx_dropped++; 606 new_skb = skb; 607 goto next; 608 } 609 610 if (likely(dev->features & NETIF_F_RXCSUM)) 611 ioc3_tcpudp_checksum(skb, 612 w0 & ERXBUF_IPCKSUM_MASK, len); 613 614 netif_rx(skb); 615 616 ip->rx_skbs[rx_entry] = NULL; /* Poison */ 617 618 /* Because we reserve afterwards. */ 619 skb_put(new_skb, (1664 + RX_OFFSET)); 620 rxb = (struct ioc3_erxbuf *) new_skb->data; 621 skb_reserve(new_skb, RX_OFFSET); 622 623 dev->stats.rx_packets++; /* Statistics */ 624 dev->stats.rx_bytes += len; 625 } else { 626 /* The frame is invalid and the skb never 627 reached the network layer so we can just 628 recycle it. */ 629 new_skb = skb; 630 dev->stats.rx_errors++; 631 } 632 if (err & ERXBUF_CRCERR) /* Statistics */ 633 dev->stats.rx_crc_errors++; 634 if (err & ERXBUF_FRAMERR) 635 dev->stats.rx_frame_errors++; 636 next: 637 ip->rx_skbs[n_entry] = new_skb; 638 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); 639 rxb->w0 = 0; /* Clear valid flag */ 640 n_entry = (n_entry + 1) & 511; /* Update erpir */ 641 642 /* Now go on to the next ring entry. */ 643 rx_entry = (rx_entry + 1) & 511; 644 skb = ip->rx_skbs[rx_entry]; 645 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 646 w0 = be32_to_cpu(rxb->w0); 647 } 648 ioc3_w_erpir((n_entry << 3) | ERPIR_ARM); 649 ip->rx_pi = n_entry; 650 ip->rx_ci = rx_entry; 651 } 652 653 static inline void ioc3_tx(struct net_device *dev) 654 { 655 struct ioc3_private *ip = netdev_priv(dev); 656 unsigned long packets, bytes; 657 struct ioc3 *ioc3 = ip->regs; 658 int tx_entry, o_entry; 659 struct sk_buff *skb; 660 u32 etcir; 661 662 spin_lock(&ip->ioc3_lock); 663 etcir = ioc3_r_etcir(); 664 665 tx_entry = (etcir >> 7) & 127; 666 o_entry = ip->tx_ci; 667 packets = 0; 668 bytes = 0; 669 670 while (o_entry != tx_entry) { 671 packets++; 672 skb = ip->tx_skbs[o_entry]; 673 bytes += skb->len; 674 dev_kfree_skb_irq(skb); 675 ip->tx_skbs[o_entry] = NULL; 676 677 o_entry = (o_entry + 1) & 127; /* Next */ 678 679 etcir = ioc3_r_etcir(); /* More pkts sent? */ 680 tx_entry = (etcir >> 7) & 127; 681 } 682 683 dev->stats.tx_packets += packets; 684 dev->stats.tx_bytes += bytes; 685 ip->txqlen -= packets; 686 687 if (ip->txqlen < 128) 688 netif_wake_queue(dev); 689 690 ip->tx_ci = o_entry; 691 spin_unlock(&ip->ioc3_lock); 692 } 693 694 /* 695 * Deal with fatal IOC3 errors. This condition might be caused by a hard or 696 * software problems, so we should try to recover 697 * more gracefully if this ever happens. In theory we might be flooded 698 * with such error interrupts if something really goes wrong, so we might 699 * also consider to take the interface down. 700 */ 701 static void ioc3_error(struct net_device *dev, u32 eisr) 702 { 703 struct ioc3_private *ip = netdev_priv(dev); 704 unsigned char *iface = dev->name; 705 706 spin_lock(&ip->ioc3_lock); 707 708 if (eisr & EISR_RXOFLO) 709 printk(KERN_ERR "%s: RX overflow.\n", iface); 710 if (eisr & EISR_RXBUFOFLO) 711 printk(KERN_ERR "%s: RX buffer overflow.\n", iface); 712 if (eisr & EISR_RXMEMERR) 713 printk(KERN_ERR "%s: RX PCI error.\n", iface); 714 if (eisr & EISR_RXPARERR) 715 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface); 716 if (eisr & EISR_TXBUFUFLO) 717 printk(KERN_ERR "%s: TX buffer underflow.\n", iface); 718 if (eisr & EISR_TXMEMERR) 719 printk(KERN_ERR "%s: TX PCI error.\n", iface); 720 721 ioc3_stop(ip); 722 ioc3_init(dev); 723 ioc3_mii_init(ip); 724 725 netif_wake_queue(dev); 726 727 spin_unlock(&ip->ioc3_lock); 728 } 729 730 /* The interrupt handler does all of the Rx thread work and cleans up 731 after the Tx thread. */ 732 static irqreturn_t ioc3_interrupt(int irq, void *_dev) 733 { 734 struct net_device *dev = (struct net_device *)_dev; 735 struct ioc3_private *ip = netdev_priv(dev); 736 struct ioc3 *ioc3 = ip->regs; 737 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 738 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 739 EISR_TXEXPLICIT | EISR_TXMEMERR; 740 u32 eisr; 741 742 eisr = ioc3_r_eisr() & enabled; 743 744 ioc3_w_eisr(eisr); 745 (void) ioc3_r_eisr(); /* Flush */ 746 747 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | 748 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) 749 ioc3_error(dev, eisr); 750 if (eisr & EISR_RXTIMERINT) 751 ioc3_rx(dev); 752 if (eisr & EISR_TXEXPLICIT) 753 ioc3_tx(dev); 754 755 return IRQ_HANDLED; 756 } 757 758 static inline void ioc3_setup_duplex(struct ioc3_private *ip) 759 { 760 struct ioc3 *ioc3 = ip->regs; 761 762 if (ip->mii.full_duplex) { 763 ioc3_w_etcsr(ETCSR_FD); 764 ip->emcr |= EMCR_DUPLEX; 765 } else { 766 ioc3_w_etcsr(ETCSR_HD); 767 ip->emcr &= ~EMCR_DUPLEX; 768 } 769 ioc3_w_emcr(ip->emcr); 770 } 771 772 static void ioc3_timer(unsigned long data) 773 { 774 struct ioc3_private *ip = (struct ioc3_private *) data; 775 776 /* Print the link status if it has changed */ 777 mii_check_media(&ip->mii, 1, 0); 778 ioc3_setup_duplex(ip); 779 780 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */ 781 add_timer(&ip->ioc3_timer); 782 } 783 784 /* 785 * Try to find a PHY. There is no apparent relation between the MII addresses 786 * in the SGI documentation and what we find in reality, so we simply probe 787 * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my 788 * onboard IOC3s has the special oddity that probing doesn't seem to find it 789 * yet the interface seems to work fine, so if probing fails we for now will 790 * simply default to PHY 31 instead of bailing out. 791 */ 792 static int ioc3_mii_init(struct ioc3_private *ip) 793 { 794 struct net_device *dev = priv_netdev(ip); 795 int i, found = 0, res = 0; 796 int ioc3_phy_workaround = 1; 797 u16 word; 798 799 for (i = 0; i < 32; i++) { 800 word = ioc3_mdio_read(dev, i, MII_PHYSID1); 801 802 if (word != 0xffff && word != 0x0000) { 803 found = 1; 804 break; /* Found a PHY */ 805 } 806 } 807 808 if (!found) { 809 if (ioc3_phy_workaround) 810 i = 31; 811 else { 812 ip->mii.phy_id = -1; 813 res = -ENODEV; 814 goto out; 815 } 816 } 817 818 ip->mii.phy_id = i; 819 820 out: 821 return res; 822 } 823 824 static void ioc3_mii_start(struct ioc3_private *ip) 825 { 826 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 827 ip->ioc3_timer.data = (unsigned long) ip; 828 ip->ioc3_timer.function = ioc3_timer; 829 add_timer(&ip->ioc3_timer); 830 } 831 832 static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) 833 { 834 struct sk_buff *skb; 835 int i; 836 837 for (i = ip->rx_ci; i & 15; i++) { 838 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; 839 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; 840 } 841 ip->rx_pi &= 511; 842 ip->rx_ci &= 511; 843 844 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) { 845 struct ioc3_erxbuf *rxb; 846 skb = ip->rx_skbs[i]; 847 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 848 rxb->w0 = 0; 849 } 850 } 851 852 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) 853 { 854 struct sk_buff *skb; 855 int i; 856 857 for (i=0; i < 128; i++) { 858 skb = ip->tx_skbs[i]; 859 if (skb) { 860 ip->tx_skbs[i] = NULL; 861 dev_kfree_skb_any(skb); 862 } 863 ip->txr[i].cmd = 0; 864 } 865 ip->tx_pi = 0; 866 ip->tx_ci = 0; 867 } 868 869 static void ioc3_free_rings(struct ioc3_private *ip) 870 { 871 struct sk_buff *skb; 872 int rx_entry, n_entry; 873 874 if (ip->txr) { 875 ioc3_clean_tx_ring(ip); 876 free_pages((unsigned long)ip->txr, 2); 877 ip->txr = NULL; 878 } 879 880 if (ip->rxr) { 881 n_entry = ip->rx_ci; 882 rx_entry = ip->rx_pi; 883 884 while (n_entry != rx_entry) { 885 skb = ip->rx_skbs[n_entry]; 886 if (skb) 887 dev_kfree_skb_any(skb); 888 889 n_entry = (n_entry + 1) & 511; 890 } 891 free_page((unsigned long)ip->rxr); 892 ip->rxr = NULL; 893 } 894 } 895 896 static void ioc3_alloc_rings(struct net_device *dev) 897 { 898 struct ioc3_private *ip = netdev_priv(dev); 899 struct ioc3_erxbuf *rxb; 900 unsigned long *rxr; 901 int i; 902 903 if (ip->rxr == NULL) { 904 /* Allocate and initialize rx ring. 4kb = 512 entries */ 905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 906 rxr = ip->rxr; 907 if (!rxr) 908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); 909 910 /* Now the rx buffers. The RX ring may be larger but 911 we only allocate 16 buffers for now. Need to tune 912 this for performance and memory later. */ 913 for (i = 0; i < RX_BUFFS; i++) { 914 struct sk_buff *skb; 915 916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 917 if (!skb) { 918 show_free_areas(0); 919 continue; 920 } 921 922 ip->rx_skbs[i] = skb; 923 924 /* Because we reserve afterwards. */ 925 skb_put(skb, (1664 + RX_OFFSET)); 926 rxb = (struct ioc3_erxbuf *) skb->data; 927 rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); 928 skb_reserve(skb, RX_OFFSET); 929 } 930 ip->rx_ci = 0; 931 ip->rx_pi = RX_BUFFS; 932 } 933 934 if (ip->txr == NULL) { 935 /* Allocate and initialize tx rings. 16kb = 128 bufs. */ 936 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); 937 if (!ip->txr) 938 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n"); 939 ip->tx_pi = 0; 940 ip->tx_ci = 0; 941 } 942 } 943 944 static void ioc3_init_rings(struct net_device *dev) 945 { 946 struct ioc3_private *ip = netdev_priv(dev); 947 struct ioc3 *ioc3 = ip->regs; 948 unsigned long ring; 949 950 ioc3_free_rings(ip); 951 ioc3_alloc_rings(dev); 952 953 ioc3_clean_rx_ring(ip); 954 ioc3_clean_tx_ring(ip); 955 956 /* Now the rx ring base, consume & produce registers. */ 957 ring = ioc3_map(ip->rxr, 0); 958 ioc3_w_erbr_h(ring >> 32); 959 ioc3_w_erbr_l(ring & 0xffffffff); 960 ioc3_w_ercir(ip->rx_ci << 3); 961 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM); 962 963 ring = ioc3_map(ip->txr, 0); 964 965 ip->txqlen = 0; /* nothing queued */ 966 967 /* Now the tx ring base, consume & produce registers. */ 968 ioc3_w_etbr_h(ring >> 32); 969 ioc3_w_etbr_l(ring & 0xffffffff); 970 ioc3_w_etpir(ip->tx_pi << 7); 971 ioc3_w_etcir(ip->tx_ci << 7); 972 (void) ioc3_r_etcir(); /* Flush */ 973 } 974 975 static inline void ioc3_ssram_disc(struct ioc3_private *ip) 976 { 977 struct ioc3 *ioc3 = ip->regs; 978 volatile u32 *ssram0 = &ioc3->ssram[0x0000]; 979 volatile u32 *ssram1 = &ioc3->ssram[0x4000]; 980 unsigned int pattern = 0x5555; 981 982 /* Assume the larger size SSRAM and enable parity checking */ 983 ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR)); 984 985 *ssram0 = pattern; 986 *ssram1 = ~pattern & IOC3_SSRAM_DM; 987 988 if ((*ssram0 & IOC3_SSRAM_DM) != pattern || 989 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { 990 /* set ssram size to 64 KB */ 991 ip->emcr = EMCR_RAMPAR; 992 ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ); 993 } else 994 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR; 995 } 996 997 static void ioc3_init(struct net_device *dev) 998 { 999 struct ioc3_private *ip = netdev_priv(dev); 1000 struct ioc3 *ioc3 = ip->regs; 1001 1002 del_timer_sync(&ip->ioc3_timer); /* Kill if running */ 1003 1004 ioc3_w_emcr(EMCR_RST); /* Reset */ 1005 (void) ioc3_r_emcr(); /* Flush WB */ 1006 udelay(4); /* Give it time ... */ 1007 ioc3_w_emcr(0); 1008 (void) ioc3_r_emcr(); 1009 1010 /* Misc registers */ 1011 #ifdef CONFIG_SGI_IP27 1012 ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */ 1013 #else 1014 ioc3_w_erbar(0); /* Let PCI API get it right */ 1015 #endif 1016 (void) ioc3_r_etcdc(); /* Clear on read */ 1017 ioc3_w_ercsr(15); /* RX low watermark */ 1018 ioc3_w_ertr(0); /* Interrupt immediately */ 1019 __ioc3_set_mac_address(dev); 1020 ioc3_w_ehar_h(ip->ehar_h); 1021 ioc3_w_ehar_l(ip->ehar_l); 1022 ioc3_w_ersr(42); /* XXX should be random */ 1023 1024 ioc3_init_rings(dev); 1025 1026 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | 1027 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; 1028 ioc3_w_emcr(ip->emcr); 1029 ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 1030 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 1031 EISR_TXEXPLICIT | EISR_TXMEMERR); 1032 (void) ioc3_r_eier(); 1033 } 1034 1035 static inline void ioc3_stop(struct ioc3_private *ip) 1036 { 1037 struct ioc3 *ioc3 = ip->regs; 1038 1039 ioc3_w_emcr(0); /* Shutup */ 1040 ioc3_w_eier(0); /* Disable interrupts */ 1041 (void) ioc3_r_eier(); /* Flush */ 1042 } 1043 1044 static int ioc3_open(struct net_device *dev) 1045 { 1046 struct ioc3_private *ip = netdev_priv(dev); 1047 1048 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) { 1049 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); 1050 1051 return -EAGAIN; 1052 } 1053 1054 ip->ehar_h = 0; 1055 ip->ehar_l = 0; 1056 ioc3_init(dev); 1057 ioc3_mii_start(ip); 1058 1059 netif_start_queue(dev); 1060 return 0; 1061 } 1062 1063 static int ioc3_close(struct net_device *dev) 1064 { 1065 struct ioc3_private *ip = netdev_priv(dev); 1066 1067 del_timer_sync(&ip->ioc3_timer); 1068 1069 netif_stop_queue(dev); 1070 1071 ioc3_stop(ip); 1072 free_irq(dev->irq, dev); 1073 1074 ioc3_free_rings(ip); 1075 return 0; 1076 } 1077 1078 /* 1079 * MENET cards have four IOC3 chips, which are attached to two sets of 1080 * PCI slot resources each: the primary connections are on slots 1081 * 0..3 and the secondaries are on 4..7 1082 * 1083 * All four ethernets are brought out to connectors; six serial ports 1084 * (a pair from each of the first three IOC3s) are brought out to 1085 * MiniDINs; all other subdevices are left swinging in the wind, leave 1086 * them disabled. 1087 */ 1088 1089 static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot) 1090 { 1091 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0)); 1092 int ret = 0; 1093 1094 if (dev) { 1095 if (dev->vendor == PCI_VENDOR_ID_SGI && 1096 dev->device == PCI_DEVICE_ID_SGI_IOC3) 1097 ret = 1; 1098 pci_dev_put(dev); 1099 } 1100 1101 return ret; 1102 } 1103 1104 static int ioc3_is_menet(struct pci_dev *pdev) 1105 { 1106 return pdev->bus->parent == NULL && 1107 ioc3_adjacent_is_ioc3(pdev, 0) && 1108 ioc3_adjacent_is_ioc3(pdev, 1) && 1109 ioc3_adjacent_is_ioc3(pdev, 2); 1110 } 1111 1112 #ifdef CONFIG_SERIAL_8250 1113 /* 1114 * Note about serial ports and consoles: 1115 * For console output, everyone uses the IOC3 UARTA (offset 0x178) 1116 * connected to the master node (look in ip27_setup_console() and 1117 * ip27prom_console_write()). 1118 * 1119 * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port 1120 * addresses on a partitioned machine. Since we currently use the ioc3 1121 * serial ports, we use dynamic serial port discovery that the serial.c 1122 * driver uses for pci/pnp ports (there is an entry for the SGI ioc3 1123 * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater 1124 * than UARTB's, although UARTA on o200s has traditionally been known as 1125 * port 0. So, we just use one serial port from each ioc3 (since the 1126 * serial driver adds addresses to get to higher ports). 1127 * 1128 * The first one to do a register_console becomes the preferred console 1129 * (if there is no kernel command line console= directive). /dev/console 1130 * (ie 5, 1) is then "aliased" into the device number returned by the 1131 * "device" routine referred to in this console structure 1132 * (ip27prom_console_dev). 1133 * 1134 * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working 1135 * around ioc3 oddities in this respect. 1136 * 1137 * The IOC3 serials use a 22MHz clock rate with an additional divider which 1138 * can be programmed in the SCR register if the DLAB bit is set. 1139 * 1140 * Register to interrupt zero because we share the interrupt with 1141 * the serial driver which we don't properly support yet. 1142 * 1143 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been 1144 * registered. 1145 */ 1146 static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart) 1147 { 1148 #define COSMISC_CONSTANT 6 1149 1150 struct uart_8250_port port = { 1151 .port = { 1152 .irq = 0, 1153 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, 1154 .iotype = UPIO_MEM, 1155 .regshift = 0, 1156 .uartclk = (22000000 << 1) / COSMISC_CONSTANT, 1157 1158 .membase = (unsigned char __iomem *) uart, 1159 .mapbase = (unsigned long) uart, 1160 } 1161 }; 1162 unsigned char lcr; 1163 1164 lcr = uart->iu_lcr; 1165 uart->iu_lcr = lcr | UART_LCR_DLAB; 1166 uart->iu_scr = COSMISC_CONSTANT, 1167 uart->iu_lcr = lcr; 1168 uart->iu_lcr; 1169 serial8250_register_8250_port(&port); 1170 } 1171 1172 static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) 1173 { 1174 /* 1175 * We need to recognice and treat the fourth MENET serial as it 1176 * does not have an SuperIO chip attached to it, therefore attempting 1177 * to access it will result in bus errors. We call something an 1178 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3 1179 * in it. This is paranoid but we want to avoid blowing up on a 1180 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's 1181 * not paranoid enough ... 1182 */ 1183 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3) 1184 return; 1185 1186 /* 1187 * Switch IOC3 to PIO mode. It probably already was but let's be 1188 * paranoid 1189 */ 1190 ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL; 1191 ioc3->gpcr_s; 1192 ioc3->gppr_6 = 0; 1193 ioc3->gppr_6; 1194 ioc3->gppr_7 = 0; 1195 ioc3->gppr_7; 1196 ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN; 1197 ioc3->sscr_a; 1198 ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN; 1199 ioc3->sscr_b; 1200 /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ 1201 ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | 1202 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | 1203 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | 1204 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); 1205 ioc3->sio_iec |= SIO_IR_SA_INT; 1206 ioc3->sscr_a = 0; 1207 ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | 1208 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | 1209 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | 1210 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); 1211 ioc3->sio_iec |= SIO_IR_SB_INT; 1212 ioc3->sscr_b = 0; 1213 1214 ioc3_8250_register(&ioc3->sregs.uarta); 1215 ioc3_8250_register(&ioc3->sregs.uartb); 1216 } 1217 #endif 1218 1219 static const struct net_device_ops ioc3_netdev_ops = { 1220 .ndo_open = ioc3_open, 1221 .ndo_stop = ioc3_close, 1222 .ndo_start_xmit = ioc3_start_xmit, 1223 .ndo_tx_timeout = ioc3_timeout, 1224 .ndo_get_stats = ioc3_get_stats, 1225 .ndo_set_rx_mode = ioc3_set_multicast_list, 1226 .ndo_do_ioctl = ioc3_ioctl, 1227 .ndo_validate_addr = eth_validate_addr, 1228 .ndo_set_mac_address = ioc3_set_mac_address, 1229 .ndo_change_mtu = eth_change_mtu, 1230 }; 1231 1232 static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1233 { 1234 unsigned int sw_physid1, sw_physid2; 1235 struct net_device *dev = NULL; 1236 struct ioc3_private *ip; 1237 struct ioc3 *ioc3; 1238 unsigned long ioc3_base, ioc3_size; 1239 u32 vendor, model, rev; 1240 int err, pci_using_dac; 1241 1242 /* Configure DMA attributes. */ 1243 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1244 if (!err) { 1245 pci_using_dac = 1; 1246 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1247 if (err < 0) { 1248 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " 1249 "for consistent allocations\n", pci_name(pdev)); 1250 goto out; 1251 } 1252 } else { 1253 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1254 if (err) { 1255 printk(KERN_ERR "%s: No usable DMA configuration, " 1256 "aborting.\n", pci_name(pdev)); 1257 goto out; 1258 } 1259 pci_using_dac = 0; 1260 } 1261 1262 if (pci_enable_device(pdev)) 1263 return -ENODEV; 1264 1265 dev = alloc_etherdev(sizeof(struct ioc3_private)); 1266 if (!dev) { 1267 err = -ENOMEM; 1268 goto out_disable; 1269 } 1270 1271 if (pci_using_dac) 1272 dev->features |= NETIF_F_HIGHDMA; 1273 1274 err = pci_request_regions(pdev, "ioc3"); 1275 if (err) 1276 goto out_free; 1277 1278 SET_NETDEV_DEV(dev, &pdev->dev); 1279 1280 ip = netdev_priv(dev); 1281 1282 dev->irq = pdev->irq; 1283 1284 ioc3_base = pci_resource_start(pdev, 0); 1285 ioc3_size = pci_resource_len(pdev, 0); 1286 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size); 1287 if (!ioc3) { 1288 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n", 1289 pci_name(pdev)); 1290 err = -ENOMEM; 1291 goto out_res; 1292 } 1293 ip->regs = ioc3; 1294 1295 #ifdef CONFIG_SERIAL_8250 1296 ioc3_serial_probe(pdev, ioc3); 1297 #endif 1298 1299 spin_lock_init(&ip->ioc3_lock); 1300 init_timer(&ip->ioc3_timer); 1301 1302 ioc3_stop(ip); 1303 ioc3_init(dev); 1304 1305 ip->pdev = pdev; 1306 1307 ip->mii.phy_id_mask = 0x1f; 1308 ip->mii.reg_num_mask = 0x1f; 1309 ip->mii.dev = dev; 1310 ip->mii.mdio_read = ioc3_mdio_read; 1311 ip->mii.mdio_write = ioc3_mdio_write; 1312 1313 ioc3_mii_init(ip); 1314 1315 if (ip->mii.phy_id == -1) { 1316 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n", 1317 pci_name(pdev)); 1318 err = -ENODEV; 1319 goto out_stop; 1320 } 1321 1322 ioc3_mii_start(ip); 1323 ioc3_ssram_disc(ip); 1324 ioc3_get_eaddr(ip); 1325 1326 /* The IOC3-specific entries in the device structure. */ 1327 dev->watchdog_timeo = 5 * HZ; 1328 dev->netdev_ops = &ioc3_netdev_ops; 1329 dev->ethtool_ops = &ioc3_ethtool_ops; 1330 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1331 dev->features = NETIF_F_IP_CSUM; 1332 1333 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); 1334 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); 1335 1336 err = register_netdev(dev); 1337 if (err) 1338 goto out_stop; 1339 1340 mii_check_media(&ip->mii, 1, 1); 1341 ioc3_setup_duplex(ip); 1342 1343 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); 1344 model = (sw_physid2 >> 4) & 0x3f; 1345 rev = sw_physid2 & 0xf; 1346 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, " 1347 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev); 1348 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name, 1349 ip->emcr & EMCR_BUFSIZ ? 128 : 64); 1350 1351 return 0; 1352 1353 out_stop: 1354 ioc3_stop(ip); 1355 del_timer_sync(&ip->ioc3_timer); 1356 ioc3_free_rings(ip); 1357 out_res: 1358 pci_release_regions(pdev); 1359 out_free: 1360 free_netdev(dev); 1361 out_disable: 1362 /* 1363 * We should call pci_disable_device(pdev); here if the IOC3 wasn't 1364 * such a weird device ... 1365 */ 1366 out: 1367 return err; 1368 } 1369 1370 static void ioc3_remove_one(struct pci_dev *pdev) 1371 { 1372 struct net_device *dev = pci_get_drvdata(pdev); 1373 struct ioc3_private *ip = netdev_priv(dev); 1374 struct ioc3 *ioc3 = ip->regs; 1375 1376 unregister_netdev(dev); 1377 del_timer_sync(&ip->ioc3_timer); 1378 1379 iounmap(ioc3); 1380 pci_release_regions(pdev); 1381 free_netdev(dev); 1382 /* 1383 * We should call pci_disable_device(pdev); here if the IOC3 wasn't 1384 * such a weird device ... 1385 */ 1386 } 1387 1388 static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = { 1389 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, 1390 { 0 } 1391 }; 1392 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl); 1393 1394 static struct pci_driver ioc3_driver = { 1395 .name = "ioc3-eth", 1396 .id_table = ioc3_pci_tbl, 1397 .probe = ioc3_probe, 1398 .remove = ioc3_remove_one, 1399 }; 1400 1401 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1402 { 1403 unsigned long data; 1404 struct ioc3_private *ip = netdev_priv(dev); 1405 struct ioc3 *ioc3 = ip->regs; 1406 unsigned int len; 1407 struct ioc3_etxd *desc; 1408 uint32_t w0 = 0; 1409 int produce; 1410 1411 /* 1412 * IOC3 has a fairly simple minded checksumming hardware which simply 1413 * adds up the 1's complement checksum for the entire packet and 1414 * inserts it at an offset which can be specified in the descriptor 1415 * into the transmit packet. This means we have to compensate for the 1416 * MAC header which should not be summed and the TCP/UDP pseudo headers 1417 * manually. 1418 */ 1419 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1420 const struct iphdr *ih = ip_hdr(skb); 1421 const int proto = ntohs(ih->protocol); 1422 unsigned int csoff; 1423 uint32_t csum, ehsum; 1424 uint16_t *eh; 1425 1426 /* The MAC header. skb->mac seem the logic approach 1427 to find the MAC header - except it's a NULL pointer ... */ 1428 eh = (uint16_t *) skb->data; 1429 1430 /* Sum up dest addr, src addr and protocol */ 1431 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; 1432 1433 /* Fold ehsum. can't use csum_fold which negates also ... */ 1434 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 1435 ehsum = (ehsum & 0xffff) + (ehsum >> 16); 1436 1437 /* Skip IP header; it's sum is always zero and was 1438 already filled in by ip_output.c */ 1439 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, 1440 ih->tot_len - (ih->ihl << 2), 1441 proto, 0xffff ^ ehsum); 1442 1443 csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ 1444 csum = (csum & 0xffff) + (csum >> 16); 1445 1446 csoff = ETH_HLEN + (ih->ihl << 2); 1447 if (proto == IPPROTO_UDP) { 1448 csoff += offsetof(struct udphdr, check); 1449 udp_hdr(skb)->check = csum; 1450 } 1451 if (proto == IPPROTO_TCP) { 1452 csoff += offsetof(struct tcphdr, check); 1453 tcp_hdr(skb)->check = csum; 1454 } 1455 1456 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); 1457 } 1458 1459 spin_lock_irq(&ip->ioc3_lock); 1460 1461 data = (unsigned long) skb->data; 1462 len = skb->len; 1463 1464 produce = ip->tx_pi; 1465 desc = &ip->txr[produce]; 1466 1467 if (len <= 104) { 1468 /* Short packet, let's copy it directly into the ring. */ 1469 skb_copy_from_linear_data(skb, desc->data, skb->len); 1470 if (len < ETH_ZLEN) { 1471 /* Very short packet, pad with zeros at the end. */ 1472 memset(desc->data + len, 0, ETH_ZLEN - len); 1473 len = ETH_ZLEN; 1474 } 1475 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); 1476 desc->bufcnt = cpu_to_be32(len); 1477 } else if ((data ^ (data + len - 1)) & 0x4000) { 1478 unsigned long b2 = (data | 0x3fffUL) + 1UL; 1479 unsigned long s1 = b2 - data; 1480 unsigned long s2 = data + len - b2; 1481 1482 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | 1483 ETXD_B1V | ETXD_B2V | w0); 1484 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | 1485 (s2 << ETXD_B2CNT_SHIFT)); 1486 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); 1487 desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1)); 1488 } else { 1489 /* Normal sized packet that doesn't cross a page boundary. */ 1490 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); 1491 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); 1492 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); 1493 } 1494 1495 BARRIER(); 1496 1497 ip->tx_skbs[produce] = skb; /* Remember skb */ 1498 produce = (produce + 1) & 127; 1499 ip->tx_pi = produce; 1500 ioc3_w_etpir(produce << 7); /* Fire ... */ 1501 1502 ip->txqlen++; 1503 1504 if (ip->txqlen >= 127) 1505 netif_stop_queue(dev); 1506 1507 spin_unlock_irq(&ip->ioc3_lock); 1508 1509 return NETDEV_TX_OK; 1510 } 1511 1512 static void ioc3_timeout(struct net_device *dev) 1513 { 1514 struct ioc3_private *ip = netdev_priv(dev); 1515 1516 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 1517 1518 spin_lock_irq(&ip->ioc3_lock); 1519 1520 ioc3_stop(ip); 1521 ioc3_init(dev); 1522 ioc3_mii_init(ip); 1523 ioc3_mii_start(ip); 1524 1525 spin_unlock_irq(&ip->ioc3_lock); 1526 1527 netif_wake_queue(dev); 1528 } 1529 1530 /* 1531 * Given a multicast ethernet address, this routine calculates the 1532 * address's bit index in the logical address filter mask 1533 */ 1534 1535 static inline unsigned int ioc3_hash(const unsigned char *addr) 1536 { 1537 unsigned int temp = 0; 1538 u32 crc; 1539 int bits; 1540 1541 crc = ether_crc_le(ETH_ALEN, addr); 1542 1543 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ 1544 for (bits = 6; --bits >= 0; ) { 1545 temp <<= 1; 1546 temp |= (crc & 0x1); 1547 crc >>= 1; 1548 } 1549 1550 return temp; 1551 } 1552 1553 static void ioc3_get_drvinfo (struct net_device *dev, 1554 struct ethtool_drvinfo *info) 1555 { 1556 struct ioc3_private *ip = netdev_priv(dev); 1557 1558 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver)); 1559 strlcpy(info->version, IOC3_VERSION, sizeof(info->version)); 1560 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); 1561 } 1562 1563 static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1564 { 1565 struct ioc3_private *ip = netdev_priv(dev); 1566 int rc; 1567 1568 spin_lock_irq(&ip->ioc3_lock); 1569 rc = mii_ethtool_gset(&ip->mii, cmd); 1570 spin_unlock_irq(&ip->ioc3_lock); 1571 1572 return rc; 1573 } 1574 1575 static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1576 { 1577 struct ioc3_private *ip = netdev_priv(dev); 1578 int rc; 1579 1580 spin_lock_irq(&ip->ioc3_lock); 1581 rc = mii_ethtool_sset(&ip->mii, cmd); 1582 spin_unlock_irq(&ip->ioc3_lock); 1583 1584 return rc; 1585 } 1586 1587 static int ioc3_nway_reset(struct net_device *dev) 1588 { 1589 struct ioc3_private *ip = netdev_priv(dev); 1590 int rc; 1591 1592 spin_lock_irq(&ip->ioc3_lock); 1593 rc = mii_nway_restart(&ip->mii); 1594 spin_unlock_irq(&ip->ioc3_lock); 1595 1596 return rc; 1597 } 1598 1599 static u32 ioc3_get_link(struct net_device *dev) 1600 { 1601 struct ioc3_private *ip = netdev_priv(dev); 1602 int rc; 1603 1604 spin_lock_irq(&ip->ioc3_lock); 1605 rc = mii_link_ok(&ip->mii); 1606 spin_unlock_irq(&ip->ioc3_lock); 1607 1608 return rc; 1609 } 1610 1611 static const struct ethtool_ops ioc3_ethtool_ops = { 1612 .get_drvinfo = ioc3_get_drvinfo, 1613 .get_settings = ioc3_get_settings, 1614 .set_settings = ioc3_set_settings, 1615 .nway_reset = ioc3_nway_reset, 1616 .get_link = ioc3_get_link, 1617 }; 1618 1619 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1620 { 1621 struct ioc3_private *ip = netdev_priv(dev); 1622 int rc; 1623 1624 spin_lock_irq(&ip->ioc3_lock); 1625 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); 1626 spin_unlock_irq(&ip->ioc3_lock); 1627 1628 return rc; 1629 } 1630 1631 static void ioc3_set_multicast_list(struct net_device *dev) 1632 { 1633 struct netdev_hw_addr *ha; 1634 struct ioc3_private *ip = netdev_priv(dev); 1635 struct ioc3 *ioc3 = ip->regs; 1636 u64 ehar = 0; 1637 1638 netif_stop_queue(dev); /* Lock out others. */ 1639 1640 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1641 ip->emcr |= EMCR_PROMISC; 1642 ioc3_w_emcr(ip->emcr); 1643 (void) ioc3_r_emcr(); 1644 } else { 1645 ip->emcr &= ~EMCR_PROMISC; 1646 ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ 1647 (void) ioc3_r_emcr(); 1648 1649 if ((dev->flags & IFF_ALLMULTI) || 1650 (netdev_mc_count(dev) > 64)) { 1651 /* Too many for hashing to make sense or we want all 1652 multicast packets anyway, so skip computing all the 1653 hashes and just accept all packets. */ 1654 ip->ehar_h = 0xffffffff; 1655 ip->ehar_l = 0xffffffff; 1656 } else { 1657 netdev_for_each_mc_addr(ha, dev) { 1658 ehar |= (1UL << ioc3_hash(ha->addr)); 1659 } 1660 ip->ehar_h = ehar >> 32; 1661 ip->ehar_l = ehar & 0xffffffff; 1662 } 1663 ioc3_w_ehar_h(ip->ehar_h); 1664 ioc3_w_ehar_l(ip->ehar_l); 1665 } 1666 1667 netif_wake_queue(dev); /* Let us get going again. */ 1668 } 1669 1670 module_pci_driver(ioc3_driver); 1671 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>"); 1672 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); 1673 MODULE_LICENSE("GPL"); 1674