1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/pci.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/dmapool.h> 20 #include <linux/mempool.h> 21 #include <linux/spinlock.h> 22 #include <linux/kthread.h> 23 #include <linux/interrupt.h> 24 #include <linux/errno.h> 25 #include <linux/ioport.h> 26 #include <linux/ip.h> 27 #include <linux/in.h> 28 #include <linux/if_arp.h> 29 #include <linux/if_ether.h> 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/ethtool.h> 33 #include <linux/skbuff.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/if_vlan.h> 36 #include <linux/delay.h> 37 #include <linux/mm.h> 38 #include <linux/prefetch.h> 39 40 #include "qla3xxx.h" 41 42 #define DRV_NAME "qla3xxx" 43 #define DRV_STRING "QLogic ISP3XXX Network Driver" 44 #define DRV_VERSION "v2.03.00-k5" 45 46 static const char ql3xxx_driver_name[] = DRV_NAME; 47 static const char ql3xxx_driver_version[] = DRV_VERSION; 48 49 #define TIMED_OUT_MSG \ 50 "Timed out waiting for management port to get free before issuing command\n" 51 52 MODULE_AUTHOR("QLogic Corporation"); 53 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 54 MODULE_LICENSE("GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 static const u32 default_msg 58 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 59 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 60 61 static int debug = -1; /* defaults above */ 62 module_param(debug, int, 0); 63 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 64 65 static int msi; 66 module_param(msi, int, 0); 67 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 68 69 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 71 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 72 /* required last entry */ 73 {0,} 74 }; 75 76 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 77 78 /* 79 * These are the known PHY's which are used 80 */ 81 enum PHY_DEVICE_TYPE { 82 PHY_TYPE_UNKNOWN = 0, 83 PHY_VITESSE_VSC8211, 84 PHY_AGERE_ET1011C, 85 MAX_PHY_DEV_TYPES 86 }; 87 88 struct PHY_DEVICE_INFO { 89 const enum PHY_DEVICE_TYPE phyDevice; 90 const u32 phyIdOUI; 91 const u16 phyIdModel; 92 const char *name; 93 }; 94 95 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 96 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 97 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 98 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 99 }; 100 101 102 /* 103 * Caller must take hw_lock. 104 */ 105 static int ql_sem_spinlock(struct ql3_adapter *qdev, 106 u32 sem_mask, u32 sem_bits) 107 { 108 struct ql3xxx_port_registers __iomem *port_regs = 109 qdev->mem_map_registers; 110 u32 value; 111 unsigned int seconds = 3; 112 113 do { 114 writel((sem_mask | sem_bits), 115 &port_regs->CommonRegs.semaphoreReg); 116 value = readl(&port_regs->CommonRegs.semaphoreReg); 117 if ((value & (sem_mask >> 16)) == sem_bits) 118 return 0; 119 ssleep(1); 120 } while (--seconds); 121 return -1; 122 } 123 124 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 125 { 126 struct ql3xxx_port_registers __iomem *port_regs = 127 qdev->mem_map_registers; 128 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 129 readl(&port_regs->CommonRegs.semaphoreReg); 130 } 131 132 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 133 { 134 struct ql3xxx_port_registers __iomem *port_regs = 135 qdev->mem_map_registers; 136 u32 value; 137 138 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 139 value = readl(&port_regs->CommonRegs.semaphoreReg); 140 return ((value & (sem_mask >> 16)) == sem_bits); 141 } 142 143 /* 144 * Caller holds hw_lock. 145 */ 146 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 147 { 148 int i = 0; 149 150 while (i < 10) { 151 if (i) 152 ssleep(1); 153 154 if (ql_sem_lock(qdev, 155 QL_DRVR_SEM_MASK, 156 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 157 * 2) << 1)) { 158 netdev_printk(KERN_DEBUG, qdev->ndev, 159 "driver lock acquired\n"); 160 return 1; 161 } 162 } 163 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 165 return 0; 166 } 167 168 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 169 { 170 struct ql3xxx_port_registers __iomem *port_regs = 171 qdev->mem_map_registers; 172 173 writel(((ISP_CONTROL_NP_MASK << 16) | page), 174 &port_regs->CommonRegs.ispControlStatus); 175 readl(&port_regs->CommonRegs.ispControlStatus); 176 qdev->current_page = page; 177 } 178 179 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 180 { 181 u32 value; 182 unsigned long hw_flags; 183 184 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 185 value = readl(reg); 186 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 187 188 return value; 189 } 190 191 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 192 { 193 return readl(reg); 194 } 195 196 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 197 { 198 u32 value; 199 unsigned long hw_flags; 200 201 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 202 203 if (qdev->current_page != 0) 204 ql_set_register_page(qdev, 0); 205 value = readl(reg); 206 207 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 208 return value; 209 } 210 211 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 212 { 213 if (qdev->current_page != 0) 214 ql_set_register_page(qdev, 0); 215 return readl(reg); 216 } 217 218 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 219 u32 __iomem *reg, u32 value) 220 { 221 unsigned long hw_flags; 222 223 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 224 writel(value, reg); 225 readl(reg); 226 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 227 } 228 229 static void ql_write_common_reg(struct ql3_adapter *qdev, 230 u32 __iomem *reg, u32 value) 231 { 232 writel(value, reg); 233 readl(reg); 234 } 235 236 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 237 u32 __iomem *reg, u32 value) 238 { 239 writel(value, reg); 240 readl(reg); 241 udelay(1); 242 } 243 244 static void ql_write_page0_reg(struct ql3_adapter *qdev, 245 u32 __iomem *reg, u32 value) 246 { 247 if (qdev->current_page != 0) 248 ql_set_register_page(qdev, 0); 249 writel(value, reg); 250 readl(reg); 251 } 252 253 /* 254 * Caller holds hw_lock. Only called during init. 255 */ 256 static void ql_write_page1_reg(struct ql3_adapter *qdev, 257 u32 __iomem *reg, u32 value) 258 { 259 if (qdev->current_page != 1) 260 ql_set_register_page(qdev, 1); 261 writel(value, reg); 262 readl(reg); 263 } 264 265 /* 266 * Caller holds hw_lock. Only called during init. 267 */ 268 static void ql_write_page2_reg(struct ql3_adapter *qdev, 269 u32 __iomem *reg, u32 value) 270 { 271 if (qdev->current_page != 2) 272 ql_set_register_page(qdev, 2); 273 writel(value, reg); 274 readl(reg); 275 } 276 277 static void ql_disable_interrupts(struct ql3_adapter *qdev) 278 { 279 struct ql3xxx_port_registers __iomem *port_regs = 280 qdev->mem_map_registers; 281 282 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 283 (ISP_IMR_ENABLE_INT << 16)); 284 285 } 286 287 static void ql_enable_interrupts(struct ql3_adapter *qdev) 288 { 289 struct ql3xxx_port_registers __iomem *port_regs = 290 qdev->mem_map_registers; 291 292 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 293 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 294 295 } 296 297 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 298 struct ql_rcv_buf_cb *lrg_buf_cb) 299 { 300 dma_addr_t map; 301 int err; 302 lrg_buf_cb->next = NULL; 303 304 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 305 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } else { 307 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 308 qdev->lrg_buf_free_tail = lrg_buf_cb; 309 } 310 311 if (!lrg_buf_cb->skb) { 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 313 qdev->lrg_buffer_len); 314 if (unlikely(!lrg_buf_cb->skb)) { 315 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); 316 qdev->lrg_buf_skb_check++; 317 } else { 318 /* 319 * We save some space to copy the ethhdr from first 320 * buffer 321 */ 322 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 323 map = pci_map_single(qdev->pdev, 324 lrg_buf_cb->skb->data, 325 qdev->lrg_buffer_len - 326 QL_HEADER_SPACE, 327 PCI_DMA_FROMDEVICE); 328 err = pci_dma_mapping_error(qdev->pdev, map); 329 if (err) { 330 netdev_err(qdev->ndev, 331 "PCI mapping failed with error: %d\n", 332 err); 333 dev_kfree_skb(lrg_buf_cb->skb); 334 lrg_buf_cb->skb = NULL; 335 336 qdev->lrg_buf_skb_check++; 337 return; 338 } 339 340 lrg_buf_cb->buf_phy_addr_low = 341 cpu_to_le32(LS_64BITS(map)); 342 lrg_buf_cb->buf_phy_addr_high = 343 cpu_to_le32(MS_64BITS(map)); 344 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 345 dma_unmap_len_set(lrg_buf_cb, maplen, 346 qdev->lrg_buffer_len - 347 QL_HEADER_SPACE); 348 } 349 } 350 351 qdev->lrg_buf_free_count++; 352 } 353 354 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 355 *qdev) 356 { 357 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 358 359 if (lrg_buf_cb != NULL) { 360 qdev->lrg_buf_free_head = lrg_buf_cb->next; 361 if (qdev->lrg_buf_free_head == NULL) 362 qdev->lrg_buf_free_tail = NULL; 363 qdev->lrg_buf_free_count--; 364 } 365 366 return lrg_buf_cb; 367 } 368 369 static u32 addrBits = EEPROM_NO_ADDR_BITS; 370 static u32 dataBits = EEPROM_NO_DATA_BITS; 371 372 static void fm93c56a_deselect(struct ql3_adapter *qdev); 373 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 374 unsigned short *value); 375 376 /* 377 * Caller holds hw_lock. 378 */ 379 static void fm93c56a_select(struct ql3_adapter *qdev) 380 { 381 struct ql3xxx_port_registers __iomem *port_regs = 382 qdev->mem_map_registers; 383 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 384 385 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 386 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 387 ql_write_nvram_reg(qdev, spir, 388 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 389 } 390 391 /* 392 * Caller holds hw_lock. 393 */ 394 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 395 { 396 int i; 397 u32 mask; 398 u32 dataBit; 399 u32 previousBit; 400 struct ql3xxx_port_registers __iomem *port_regs = 401 qdev->mem_map_registers; 402 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 403 404 /* Clock in a zero, then do the start bit */ 405 ql_write_nvram_reg(qdev, spir, 406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 407 AUBURN_EEPROM_DO_1)); 408 ql_write_nvram_reg(qdev, spir, 409 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 410 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 411 ql_write_nvram_reg(qdev, spir, 412 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 413 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 414 415 mask = 1 << (FM93C56A_CMD_BITS - 1); 416 /* Force the previous data bit to be different */ 417 previousBit = 0xffff; 418 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 419 dataBit = (cmd & mask) 420 ? AUBURN_EEPROM_DO_1 421 : AUBURN_EEPROM_DO_0; 422 if (previousBit != dataBit) { 423 /* If the bit changed, change the DO state to match */ 424 ql_write_nvram_reg(qdev, spir, 425 (ISP_NVRAM_MASK | 426 qdev->eeprom_cmd_data | dataBit)); 427 previousBit = dataBit; 428 } 429 ql_write_nvram_reg(qdev, spir, 430 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 431 dataBit | AUBURN_EEPROM_CLK_RISE)); 432 ql_write_nvram_reg(qdev, spir, 433 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 434 dataBit | AUBURN_EEPROM_CLK_FALL)); 435 cmd = cmd << 1; 436 } 437 438 mask = 1 << (addrBits - 1); 439 /* Force the previous data bit to be different */ 440 previousBit = 0xffff; 441 for (i = 0; i < addrBits; i++) { 442 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 443 : AUBURN_EEPROM_DO_0; 444 if (previousBit != dataBit) { 445 /* 446 * If the bit changed, then change the DO state to 447 * match 448 */ 449 ql_write_nvram_reg(qdev, spir, 450 (ISP_NVRAM_MASK | 451 qdev->eeprom_cmd_data | dataBit)); 452 previousBit = dataBit; 453 } 454 ql_write_nvram_reg(qdev, spir, 455 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 456 dataBit | AUBURN_EEPROM_CLK_RISE)); 457 ql_write_nvram_reg(qdev, spir, 458 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 459 dataBit | AUBURN_EEPROM_CLK_FALL)); 460 eepromAddr = eepromAddr << 1; 461 } 462 } 463 464 /* 465 * Caller holds hw_lock. 466 */ 467 static void fm93c56a_deselect(struct ql3_adapter *qdev) 468 { 469 struct ql3xxx_port_registers __iomem *port_regs = 470 qdev->mem_map_registers; 471 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 472 473 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 474 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 475 } 476 477 /* 478 * Caller holds hw_lock. 479 */ 480 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 481 { 482 int i; 483 u32 data = 0; 484 u32 dataBit; 485 struct ql3xxx_port_registers __iomem *port_regs = 486 qdev->mem_map_registers; 487 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 488 489 /* Read the data bits */ 490 /* The first bit is a dummy. Clock right over it. */ 491 for (i = 0; i < dataBits; i++) { 492 ql_write_nvram_reg(qdev, spir, 493 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 494 AUBURN_EEPROM_CLK_RISE); 495 ql_write_nvram_reg(qdev, spir, 496 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 497 AUBURN_EEPROM_CLK_FALL); 498 dataBit = (ql_read_common_reg(qdev, spir) & 499 AUBURN_EEPROM_DI_1) ? 1 : 0; 500 data = (data << 1) | dataBit; 501 } 502 *value = (u16)data; 503 } 504 505 /* 506 * Caller holds hw_lock. 507 */ 508 static void eeprom_readword(struct ql3_adapter *qdev, 509 u32 eepromAddr, unsigned short *value) 510 { 511 fm93c56a_select(qdev); 512 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 513 fm93c56a_datain(qdev, value); 514 fm93c56a_deselect(qdev); 515 } 516 517 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 518 { 519 __le16 *p = (__le16 *)ndev->dev_addr; 520 p[0] = cpu_to_le16(addr[0]); 521 p[1] = cpu_to_le16(addr[1]); 522 p[2] = cpu_to_le16(addr[2]); 523 } 524 525 static int ql_get_nvram_params(struct ql3_adapter *qdev) 526 { 527 u16 *pEEPROMData; 528 u16 checksum = 0; 529 u32 index; 530 unsigned long hw_flags; 531 532 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 533 534 pEEPROMData = (u16 *)&qdev->nvram_data; 535 qdev->eeprom_cmd_data = 0; 536 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 537 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 538 2) << 10)) { 539 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 541 return -1; 542 } 543 544 for (index = 0; index < EEPROM_SIZE; index++) { 545 eeprom_readword(qdev, index, pEEPROMData); 546 checksum += *pEEPROMData; 547 pEEPROMData++; 548 } 549 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 550 551 if (checksum != 0) { 552 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 553 checksum); 554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 555 return -1; 556 } 557 558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 559 return checksum; 560 } 561 562 static const u32 PHYAddr[2] = { 563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 564 }; 565 566 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 567 { 568 struct ql3xxx_port_registers __iomem *port_regs = 569 qdev->mem_map_registers; 570 u32 temp; 571 int count = 1000; 572 573 while (count) { 574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 575 if (!(temp & MAC_MII_STATUS_BSY)) 576 return 0; 577 udelay(10); 578 count--; 579 } 580 return -1; 581 } 582 583 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 584 { 585 struct ql3xxx_port_registers __iomem *port_regs = 586 qdev->mem_map_registers; 587 u32 scanControl; 588 589 if (qdev->numPorts > 1) { 590 /* Auto scan will cycle through multiple ports */ 591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 592 } else { 593 scanControl = MAC_MII_CONTROL_SC; 594 } 595 596 /* 597 * Scan register 1 of PHY/PETBI, 598 * Set up to scan both devices 599 * The autoscan starts from the first register, completes 600 * the last one before rolling over to the first 601 */ 602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 603 PHYAddr[0] | MII_SCAN_REGISTER); 604 605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 606 (scanControl) | 607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 608 } 609 610 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 611 { 612 u8 ret; 613 struct ql3xxx_port_registers __iomem *port_regs = 614 qdev->mem_map_registers; 615 616 /* See if scan mode is enabled before we turn it off */ 617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 619 /* Scan is enabled */ 620 ret = 1; 621 } else { 622 /* Scan is disabled */ 623 ret = 0; 624 } 625 626 /* 627 * When disabling scan mode you must first change the MII register 628 * address 629 */ 630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 631 PHYAddr[0] | MII_SCAN_REGISTER); 632 633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 635 MAC_MII_CONTROL_RC) << 16)); 636 637 return ret; 638 } 639 640 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 641 u16 regAddr, u16 value, u32 phyAddr) 642 { 643 struct ql3xxx_port_registers __iomem *port_regs = 644 qdev->mem_map_registers; 645 u8 scanWasEnabled; 646 647 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 648 649 if (ql_wait_for_mii_ready(qdev)) { 650 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 651 return -1; 652 } 653 654 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 655 phyAddr | regAddr); 656 657 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 658 659 /* Wait for write to complete 9/10/04 SJP */ 660 if (ql_wait_for_mii_ready(qdev)) { 661 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 662 return -1; 663 } 664 665 if (scanWasEnabled) 666 ql_mii_enable_scan_mode(qdev); 667 668 return 0; 669 } 670 671 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 672 u16 *value, u32 phyAddr) 673 { 674 struct ql3xxx_port_registers __iomem *port_regs = 675 qdev->mem_map_registers; 676 u8 scanWasEnabled; 677 u32 temp; 678 679 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 680 681 if (ql_wait_for_mii_ready(qdev)) { 682 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 683 return -1; 684 } 685 686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 687 phyAddr | regAddr); 688 689 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 690 (MAC_MII_CONTROL_RC << 16)); 691 692 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 693 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 694 695 /* Wait for the read to complete */ 696 if (ql_wait_for_mii_ready(qdev)) { 697 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 698 return -1; 699 } 700 701 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 702 *value = (u16) temp; 703 704 if (scanWasEnabled) 705 ql_mii_enable_scan_mode(qdev); 706 707 return 0; 708 } 709 710 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 711 { 712 struct ql3xxx_port_registers __iomem *port_regs = 713 qdev->mem_map_registers; 714 715 ql_mii_disable_scan_mode(qdev); 716 717 if (ql_wait_for_mii_ready(qdev)) { 718 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 719 return -1; 720 } 721 722 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 723 qdev->PHYAddr | regAddr); 724 725 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 726 727 /* Wait for write to complete. */ 728 if (ql_wait_for_mii_ready(qdev)) { 729 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 730 return -1; 731 } 732 733 ql_mii_enable_scan_mode(qdev); 734 735 return 0; 736 } 737 738 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 739 { 740 u32 temp; 741 struct ql3xxx_port_registers __iomem *port_regs = 742 qdev->mem_map_registers; 743 744 ql_mii_disable_scan_mode(qdev); 745 746 if (ql_wait_for_mii_ready(qdev)) { 747 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 748 return -1; 749 } 750 751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 752 qdev->PHYAddr | regAddr); 753 754 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 755 (MAC_MII_CONTROL_RC << 16)); 756 757 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 758 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 759 760 /* Wait for the read to complete */ 761 if (ql_wait_for_mii_ready(qdev)) { 762 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 763 return -1; 764 } 765 766 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 767 *value = (u16) temp; 768 769 ql_mii_enable_scan_mode(qdev); 770 771 return 0; 772 } 773 774 static void ql_petbi_reset(struct ql3_adapter *qdev) 775 { 776 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 777 } 778 779 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 780 { 781 u16 reg; 782 783 /* Enable Auto-negotiation sense */ 784 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 785 reg |= PETBI_TBI_AUTO_SENSE; 786 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 787 788 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 789 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 790 791 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 792 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 793 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 794 795 } 796 797 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 798 { 799 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 800 PHYAddr[qdev->mac_index]); 801 } 802 803 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 804 { 805 u16 reg; 806 807 /* Enable Auto-negotiation sense */ 808 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 809 PHYAddr[qdev->mac_index]); 810 reg |= PETBI_TBI_AUTO_SENSE; 811 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 812 PHYAddr[qdev->mac_index]); 813 814 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 815 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 816 PHYAddr[qdev->mac_index]); 817 818 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 819 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 820 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 821 PHYAddr[qdev->mac_index]); 822 } 823 824 static void ql_petbi_init(struct ql3_adapter *qdev) 825 { 826 ql_petbi_reset(qdev); 827 ql_petbi_start_neg(qdev); 828 } 829 830 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 831 { 832 ql_petbi_reset_ex(qdev); 833 ql_petbi_start_neg_ex(qdev); 834 } 835 836 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 837 { 838 u16 reg; 839 840 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 841 return 0; 842 843 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 844 } 845 846 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 847 { 848 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 849 /* power down device bit 11 = 1 */ 850 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 851 /* enable diagnostic mode bit 2 = 1 */ 852 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 853 /* 1000MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 855 /* 1000MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 857 /* 100MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 859 /* 100MB amplitude adjust (see Agere errata) */ 860 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 861 /* 10MB amplitude adjust (see Agere errata) */ 862 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 863 /* 10MB amplitude adjust (see Agere errata) */ 864 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 865 /* point to hidden reg 0x2806 */ 866 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 867 /* Write new PHYAD w/bit 5 set */ 868 ql_mii_write_reg_ex(qdev, 0x11, 869 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 870 /* 871 * Disable diagnostic mode bit 2 = 0 872 * Power up device bit 11 = 0 873 * Link up (on) and activity (blink) 874 */ 875 ql_mii_write_reg(qdev, 0x12, 0x840a); 876 ql_mii_write_reg(qdev, 0x00, 0x1140); 877 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 878 } 879 880 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 881 u16 phyIdReg0, u16 phyIdReg1) 882 { 883 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 884 u32 oui; 885 u16 model; 886 int i; 887 888 if (phyIdReg0 == 0xffff) 889 return result; 890 891 if (phyIdReg1 == 0xffff) 892 return result; 893 894 /* oui is split between two registers */ 895 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 896 897 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 898 899 /* Scan table for this PHY */ 900 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 901 if ((oui == PHY_DEVICES[i].phyIdOUI) && 902 (model == PHY_DEVICES[i].phyIdModel)) { 903 netdev_info(qdev->ndev, "Phy: %s\n", 904 PHY_DEVICES[i].name); 905 result = PHY_DEVICES[i].phyDevice; 906 break; 907 } 908 } 909 910 return result; 911 } 912 913 static int ql_phy_get_speed(struct ql3_adapter *qdev) 914 { 915 u16 reg; 916 917 switch (qdev->phyType) { 918 case PHY_AGERE_ET1011C: { 919 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 920 return 0; 921 922 reg = (reg >> 8) & 3; 923 break; 924 } 925 default: 926 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 927 return 0; 928 929 reg = (((reg & 0x18) >> 3) & 3); 930 } 931 932 switch (reg) { 933 case 2: 934 return SPEED_1000; 935 case 1: 936 return SPEED_100; 937 case 0: 938 return SPEED_10; 939 default: 940 return -1; 941 } 942 } 943 944 static int ql_is_full_dup(struct ql3_adapter *qdev) 945 { 946 u16 reg; 947 948 switch (qdev->phyType) { 949 case PHY_AGERE_ET1011C: { 950 if (ql_mii_read_reg(qdev, 0x1A, ®)) 951 return 0; 952 953 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 954 } 955 case PHY_VITESSE_VSC8211: 956 default: { 957 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 958 return 0; 959 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 960 } 961 } 962 } 963 964 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 965 { 966 u16 reg; 967 968 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 969 return 0; 970 971 return (reg & PHY_NEG_PAUSE) != 0; 972 } 973 974 static int PHY_Setup(struct ql3_adapter *qdev) 975 { 976 u16 reg1; 977 u16 reg2; 978 bool agereAddrChangeNeeded = false; 979 u32 miiAddr = 0; 980 int err; 981 982 /* Determine the PHY we are using by reading the ID's */ 983 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 984 if (err != 0) { 985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 986 return err; 987 } 988 989 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 990 if (err != 0) { 991 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 992 return err; 993 } 994 995 /* Check if we have a Agere PHY */ 996 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 997 998 /* Determine which MII address we should be using 999 determined by the index of the card */ 1000 if (qdev->mac_index == 0) 1001 miiAddr = MII_AGERE_ADDR_1; 1002 else 1003 miiAddr = MII_AGERE_ADDR_2; 1004 1005 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1006 if (err != 0) { 1007 netdev_err(qdev->ndev, 1008 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1009 return err; 1010 } 1011 1012 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1013 if (err != 0) { 1014 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1015 return err; 1016 } 1017 1018 /* We need to remember to initialize the Agere PHY */ 1019 agereAddrChangeNeeded = true; 1020 } 1021 1022 /* Determine the particular PHY we have on board to apply 1023 PHY specific initializations */ 1024 qdev->phyType = getPhyType(qdev, reg1, reg2); 1025 1026 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1027 /* need this here so address gets changed */ 1028 phyAgereSpecificInit(qdev, miiAddr); 1029 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1030 netdev_err(qdev->ndev, "PHY is unknown\n"); 1031 return -EIO; 1032 } 1033 1034 return 0; 1035 } 1036 1037 /* 1038 * Caller holds hw_lock. 1039 */ 1040 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1041 { 1042 struct ql3xxx_port_registers __iomem *port_regs = 1043 qdev->mem_map_registers; 1044 u32 value; 1045 1046 if (enable) 1047 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1048 else 1049 value = (MAC_CONFIG_REG_PE << 16); 1050 1051 if (qdev->mac_index) 1052 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1053 else 1054 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1055 } 1056 1057 /* 1058 * Caller holds hw_lock. 1059 */ 1060 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1061 { 1062 struct ql3xxx_port_registers __iomem *port_regs = 1063 qdev->mem_map_registers; 1064 u32 value; 1065 1066 if (enable) 1067 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1068 else 1069 value = (MAC_CONFIG_REG_SR << 16); 1070 1071 if (qdev->mac_index) 1072 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1073 else 1074 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1075 } 1076 1077 /* 1078 * Caller holds hw_lock. 1079 */ 1080 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1081 { 1082 struct ql3xxx_port_registers __iomem *port_regs = 1083 qdev->mem_map_registers; 1084 u32 value; 1085 1086 if (enable) 1087 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1088 else 1089 value = (MAC_CONFIG_REG_GM << 16); 1090 1091 if (qdev->mac_index) 1092 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1093 else 1094 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1095 } 1096 1097 /* 1098 * Caller holds hw_lock. 1099 */ 1100 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1101 { 1102 struct ql3xxx_port_registers __iomem *port_regs = 1103 qdev->mem_map_registers; 1104 u32 value; 1105 1106 if (enable) 1107 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1108 else 1109 value = (MAC_CONFIG_REG_FD << 16); 1110 1111 if (qdev->mac_index) 1112 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1113 else 1114 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1115 } 1116 1117 /* 1118 * Caller holds hw_lock. 1119 */ 1120 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1121 { 1122 struct ql3xxx_port_registers __iomem *port_regs = 1123 qdev->mem_map_registers; 1124 u32 value; 1125 1126 if (enable) 1127 value = 1128 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1129 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1130 else 1131 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1132 1133 if (qdev->mac_index) 1134 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1135 else 1136 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1137 } 1138 1139 /* 1140 * Caller holds hw_lock. 1141 */ 1142 static int ql_is_fiber(struct ql3_adapter *qdev) 1143 { 1144 struct ql3xxx_port_registers __iomem *port_regs = 1145 qdev->mem_map_registers; 1146 u32 bitToCheck = 0; 1147 u32 temp; 1148 1149 switch (qdev->mac_index) { 1150 case 0: 1151 bitToCheck = PORT_STATUS_SM0; 1152 break; 1153 case 1: 1154 bitToCheck = PORT_STATUS_SM1; 1155 break; 1156 } 1157 1158 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1159 return (temp & bitToCheck) != 0; 1160 } 1161 1162 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1163 { 1164 u16 reg; 1165 ql_mii_read_reg(qdev, 0x00, ®); 1166 return (reg & 0x1000) != 0; 1167 } 1168 1169 /* 1170 * Caller holds hw_lock. 1171 */ 1172 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1173 { 1174 struct ql3xxx_port_registers __iomem *port_regs = 1175 qdev->mem_map_registers; 1176 u32 bitToCheck = 0; 1177 u32 temp; 1178 1179 switch (qdev->mac_index) { 1180 case 0: 1181 bitToCheck = PORT_STATUS_AC0; 1182 break; 1183 case 1: 1184 bitToCheck = PORT_STATUS_AC1; 1185 break; 1186 } 1187 1188 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1189 if (temp & bitToCheck) { 1190 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1191 return 1; 1192 } 1193 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1194 return 0; 1195 } 1196 1197 /* 1198 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1199 */ 1200 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1201 { 1202 if (ql_is_fiber(qdev)) 1203 return ql_is_petbi_neg_pause(qdev); 1204 else 1205 return ql_is_phy_neg_pause(qdev); 1206 } 1207 1208 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1209 { 1210 struct ql3xxx_port_registers __iomem *port_regs = 1211 qdev->mem_map_registers; 1212 u32 bitToCheck = 0; 1213 u32 temp; 1214 1215 switch (qdev->mac_index) { 1216 case 0: 1217 bitToCheck = PORT_STATUS_AE0; 1218 break; 1219 case 1: 1220 bitToCheck = PORT_STATUS_AE1; 1221 break; 1222 } 1223 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1224 return (temp & bitToCheck) != 0; 1225 } 1226 1227 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1228 { 1229 if (ql_is_fiber(qdev)) 1230 return SPEED_1000; 1231 else 1232 return ql_phy_get_speed(qdev); 1233 } 1234 1235 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1236 { 1237 if (ql_is_fiber(qdev)) 1238 return 1; 1239 else 1240 return ql_is_full_dup(qdev); 1241 } 1242 1243 /* 1244 * Caller holds hw_lock. 1245 */ 1246 static int ql_link_down_detect(struct ql3_adapter *qdev) 1247 { 1248 struct ql3xxx_port_registers __iomem *port_regs = 1249 qdev->mem_map_registers; 1250 u32 bitToCheck = 0; 1251 u32 temp; 1252 1253 switch (qdev->mac_index) { 1254 case 0: 1255 bitToCheck = ISP_CONTROL_LINK_DN_0; 1256 break; 1257 case 1: 1258 bitToCheck = ISP_CONTROL_LINK_DN_1; 1259 break; 1260 } 1261 1262 temp = 1263 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1264 return (temp & bitToCheck) != 0; 1265 } 1266 1267 /* 1268 * Caller holds hw_lock. 1269 */ 1270 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1271 { 1272 struct ql3xxx_port_registers __iomem *port_regs = 1273 qdev->mem_map_registers; 1274 1275 switch (qdev->mac_index) { 1276 case 0: 1277 ql_write_common_reg(qdev, 1278 &port_regs->CommonRegs.ispControlStatus, 1279 (ISP_CONTROL_LINK_DN_0) | 1280 (ISP_CONTROL_LINK_DN_0 << 16)); 1281 break; 1282 1283 case 1: 1284 ql_write_common_reg(qdev, 1285 &port_regs->CommonRegs.ispControlStatus, 1286 (ISP_CONTROL_LINK_DN_1) | 1287 (ISP_CONTROL_LINK_DN_1 << 16)); 1288 break; 1289 1290 default: 1291 return 1; 1292 } 1293 1294 return 0; 1295 } 1296 1297 /* 1298 * Caller holds hw_lock. 1299 */ 1300 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1301 { 1302 struct ql3xxx_port_registers __iomem *port_regs = 1303 qdev->mem_map_registers; 1304 u32 bitToCheck = 0; 1305 u32 temp; 1306 1307 switch (qdev->mac_index) { 1308 case 0: 1309 bitToCheck = PORT_STATUS_F1_ENABLED; 1310 break; 1311 case 1: 1312 bitToCheck = PORT_STATUS_F3_ENABLED; 1313 break; 1314 default: 1315 break; 1316 } 1317 1318 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1319 if (temp & bitToCheck) { 1320 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1321 "not link master\n"); 1322 return 0; 1323 } 1324 1325 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1326 return 1; 1327 } 1328 1329 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1330 { 1331 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1332 PHYAddr[qdev->mac_index]); 1333 } 1334 1335 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1336 { 1337 u16 reg; 1338 u16 portConfiguration; 1339 1340 if (qdev->phyType == PHY_AGERE_ET1011C) 1341 ql_mii_write_reg(qdev, 0x13, 0x0000); 1342 /* turn off external loopback */ 1343 1344 if (qdev->mac_index == 0) 1345 portConfiguration = 1346 qdev->nvram_data.macCfg_port0.portConfiguration; 1347 else 1348 portConfiguration = 1349 qdev->nvram_data.macCfg_port1.portConfiguration; 1350 1351 /* Some HBA's in the field are set to 0 and they need to 1352 be reinterpreted with a default value */ 1353 if (portConfiguration == 0) 1354 portConfiguration = PORT_CONFIG_DEFAULT; 1355 1356 /* Set the 1000 advertisements */ 1357 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1358 PHYAddr[qdev->mac_index]); 1359 reg &= ~PHY_GIG_ALL_PARAMS; 1360 1361 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1362 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1363 reg |= PHY_GIG_ADV_1000F; 1364 else 1365 reg |= PHY_GIG_ADV_1000H; 1366 } 1367 1368 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1369 PHYAddr[qdev->mac_index]); 1370 1371 /* Set the 10/100 & pause negotiation advertisements */ 1372 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1373 PHYAddr[qdev->mac_index]); 1374 reg &= ~PHY_NEG_ALL_PARAMS; 1375 1376 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1377 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1378 1379 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1380 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1381 reg |= PHY_NEG_ADV_100F; 1382 1383 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1384 reg |= PHY_NEG_ADV_10F; 1385 } 1386 1387 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1388 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1389 reg |= PHY_NEG_ADV_100H; 1390 1391 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1392 reg |= PHY_NEG_ADV_10H; 1393 } 1394 1395 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1396 reg |= 1; 1397 1398 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1399 PHYAddr[qdev->mac_index]); 1400 1401 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1402 1403 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1404 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1405 PHYAddr[qdev->mac_index]); 1406 } 1407 1408 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1409 { 1410 ql_phy_reset_ex(qdev); 1411 PHY_Setup(qdev); 1412 ql_phy_start_neg_ex(qdev); 1413 } 1414 1415 /* 1416 * Caller holds hw_lock. 1417 */ 1418 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1419 { 1420 struct ql3xxx_port_registers __iomem *port_regs = 1421 qdev->mem_map_registers; 1422 u32 bitToCheck = 0; 1423 u32 temp, linkState; 1424 1425 switch (qdev->mac_index) { 1426 case 0: 1427 bitToCheck = PORT_STATUS_UP0; 1428 break; 1429 case 1: 1430 bitToCheck = PORT_STATUS_UP1; 1431 break; 1432 } 1433 1434 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1435 if (temp & bitToCheck) 1436 linkState = LS_UP; 1437 else 1438 linkState = LS_DOWN; 1439 1440 return linkState; 1441 } 1442 1443 static int ql_port_start(struct ql3_adapter *qdev) 1444 { 1445 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1446 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1447 2) << 7)) { 1448 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1449 return -1; 1450 } 1451 1452 if (ql_is_fiber(qdev)) { 1453 ql_petbi_init(qdev); 1454 } else { 1455 /* Copper port */ 1456 ql_phy_init_ex(qdev); 1457 } 1458 1459 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1460 return 0; 1461 } 1462 1463 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1464 { 1465 1466 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1467 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1468 2) << 7)) 1469 return -1; 1470 1471 if (!ql_auto_neg_error(qdev)) { 1472 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1473 /* configure the MAC */ 1474 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1475 "Configuring link\n"); 1476 ql_mac_cfg_soft_reset(qdev, 1); 1477 ql_mac_cfg_gig(qdev, 1478 (ql_get_link_speed 1479 (qdev) == 1480 SPEED_1000)); 1481 ql_mac_cfg_full_dup(qdev, 1482 ql_is_link_full_dup 1483 (qdev)); 1484 ql_mac_cfg_pause(qdev, 1485 ql_is_neg_pause 1486 (qdev)); 1487 ql_mac_cfg_soft_reset(qdev, 0); 1488 1489 /* enable the MAC */ 1490 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1491 "Enabling mac\n"); 1492 ql_mac_enable(qdev, 1); 1493 } 1494 1495 qdev->port_link_state = LS_UP; 1496 netif_start_queue(qdev->ndev); 1497 netif_carrier_on(qdev->ndev); 1498 netif_info(qdev, link, qdev->ndev, 1499 "Link is up at %d Mbps, %s duplex\n", 1500 ql_get_link_speed(qdev), 1501 ql_is_link_full_dup(qdev) ? "full" : "half"); 1502 1503 } else { /* Remote error detected */ 1504 1505 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1506 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1507 "Remote error detected. Calling ql_port_start()\n"); 1508 /* 1509 * ql_port_start() is shared code and needs 1510 * to lock the PHY on it's own. 1511 */ 1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1513 if (ql_port_start(qdev)) /* Restart port */ 1514 return -1; 1515 return 0; 1516 } 1517 } 1518 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1519 return 0; 1520 } 1521 1522 static void ql_link_state_machine_work(struct work_struct *work) 1523 { 1524 struct ql3_adapter *qdev = 1525 container_of(work, struct ql3_adapter, link_state_work.work); 1526 1527 u32 curr_link_state; 1528 unsigned long hw_flags; 1529 1530 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1531 1532 curr_link_state = ql_get_link_state(qdev); 1533 1534 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1535 netif_info(qdev, link, qdev->ndev, 1536 "Reset in progress, skip processing link state\n"); 1537 1538 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1539 1540 /* Restart timer on 2 second interval. */ 1541 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1542 1543 return; 1544 } 1545 1546 switch (qdev->port_link_state) { 1547 default: 1548 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1549 ql_port_start(qdev); 1550 qdev->port_link_state = LS_DOWN; 1551 /* Fall Through */ 1552 1553 case LS_DOWN: 1554 if (curr_link_state == LS_UP) { 1555 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1556 if (ql_is_auto_neg_complete(qdev)) 1557 ql_finish_auto_neg(qdev); 1558 1559 if (qdev->port_link_state == LS_UP) 1560 ql_link_down_detect_clear(qdev); 1561 1562 qdev->port_link_state = LS_UP; 1563 } 1564 break; 1565 1566 case LS_UP: 1567 /* 1568 * See if the link is currently down or went down and came 1569 * back up 1570 */ 1571 if (curr_link_state == LS_DOWN) { 1572 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1573 qdev->port_link_state = LS_DOWN; 1574 } 1575 if (ql_link_down_detect(qdev)) 1576 qdev->port_link_state = LS_DOWN; 1577 break; 1578 } 1579 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1580 1581 /* Restart timer on 2 second interval. */ 1582 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1583 } 1584 1585 /* 1586 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1587 */ 1588 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1589 { 1590 if (ql_this_adapter_controls_port(qdev)) 1591 set_bit(QL_LINK_MASTER, &qdev->flags); 1592 else 1593 clear_bit(QL_LINK_MASTER, &qdev->flags); 1594 } 1595 1596 /* 1597 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1598 */ 1599 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1600 { 1601 ql_mii_enable_scan_mode(qdev); 1602 1603 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1604 if (ql_this_adapter_controls_port(qdev)) 1605 ql_petbi_init_ex(qdev); 1606 } else { 1607 if (ql_this_adapter_controls_port(qdev)) 1608 ql_phy_init_ex(qdev); 1609 } 1610 } 1611 1612 /* 1613 * MII_Setup needs to be called before taking the PHY out of reset 1614 * so that the management interface clock speed can be set properly. 1615 * It would be better if we had a way to disable MDC until after the 1616 * PHY is out of reset, but we don't have that capability. 1617 */ 1618 static int ql_mii_setup(struct ql3_adapter *qdev) 1619 { 1620 u32 reg; 1621 struct ql3xxx_port_registers __iomem *port_regs = 1622 qdev->mem_map_registers; 1623 1624 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1625 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1626 2) << 7)) 1627 return -1; 1628 1629 if (qdev->device_id == QL3032_DEVICE_ID) 1630 ql_write_page0_reg(qdev, 1631 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1632 1633 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1634 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1635 1636 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1637 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1638 1639 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1640 return 0; 1641 } 1642 1643 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1644 SUPPORTED_FIBRE | \ 1645 SUPPORTED_Autoneg) 1646 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1647 SUPPORTED_10baseT_Full | \ 1648 SUPPORTED_100baseT_Half | \ 1649 SUPPORTED_100baseT_Full | \ 1650 SUPPORTED_1000baseT_Half | \ 1651 SUPPORTED_1000baseT_Full | \ 1652 SUPPORTED_Autoneg | \ 1653 SUPPORTED_TP) \ 1654 1655 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1656 { 1657 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1658 return SUPPORTED_OPTICAL_MODES; 1659 1660 return SUPPORTED_TP_MODES; 1661 } 1662 1663 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1664 { 1665 int status; 1666 unsigned long hw_flags; 1667 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1668 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1669 (QL_RESOURCE_BITS_BASE_CODE | 1670 (qdev->mac_index) * 2) << 7)) { 1671 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1672 return 0; 1673 } 1674 status = ql_is_auto_cfg(qdev); 1675 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1676 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1677 return status; 1678 } 1679 1680 static u32 ql_get_speed(struct ql3_adapter *qdev) 1681 { 1682 u32 status; 1683 unsigned long hw_flags; 1684 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1685 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1686 (QL_RESOURCE_BITS_BASE_CODE | 1687 (qdev->mac_index) * 2) << 7)) { 1688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1689 return 0; 1690 } 1691 status = ql_get_link_speed(qdev); 1692 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1693 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1694 return status; 1695 } 1696 1697 static int ql_get_full_dup(struct ql3_adapter *qdev) 1698 { 1699 int status; 1700 unsigned long hw_flags; 1701 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1702 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1703 (QL_RESOURCE_BITS_BASE_CODE | 1704 (qdev->mac_index) * 2) << 7)) { 1705 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1706 return 0; 1707 } 1708 status = ql_is_link_full_dup(qdev); 1709 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1710 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1711 return status; 1712 } 1713 1714 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1715 { 1716 struct ql3_adapter *qdev = netdev_priv(ndev); 1717 1718 ecmd->transceiver = XCVR_INTERNAL; 1719 ecmd->supported = ql_supported_modes(qdev); 1720 1721 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1722 ecmd->port = PORT_FIBRE; 1723 } else { 1724 ecmd->port = PORT_TP; 1725 ecmd->phy_address = qdev->PHYAddr; 1726 } 1727 ecmd->advertising = ql_supported_modes(qdev); 1728 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1729 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1730 ecmd->duplex = ql_get_full_dup(qdev); 1731 return 0; 1732 } 1733 1734 static void ql_get_drvinfo(struct net_device *ndev, 1735 struct ethtool_drvinfo *drvinfo) 1736 { 1737 struct ql3_adapter *qdev = netdev_priv(ndev); 1738 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1739 strlcpy(drvinfo->version, ql3xxx_driver_version, 1740 sizeof(drvinfo->version)); 1741 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 1742 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1743 sizeof(drvinfo->bus_info)); 1744 drvinfo->regdump_len = 0; 1745 drvinfo->eedump_len = 0; 1746 } 1747 1748 static u32 ql_get_msglevel(struct net_device *ndev) 1749 { 1750 struct ql3_adapter *qdev = netdev_priv(ndev); 1751 return qdev->msg_enable; 1752 } 1753 1754 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1755 { 1756 struct ql3_adapter *qdev = netdev_priv(ndev); 1757 qdev->msg_enable = value; 1758 } 1759 1760 static void ql_get_pauseparam(struct net_device *ndev, 1761 struct ethtool_pauseparam *pause) 1762 { 1763 struct ql3_adapter *qdev = netdev_priv(ndev); 1764 struct ql3xxx_port_registers __iomem *port_regs = 1765 qdev->mem_map_registers; 1766 1767 u32 reg; 1768 if (qdev->mac_index == 0) 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1770 else 1771 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1772 1773 pause->autoneg = ql_get_auto_cfg_status(qdev); 1774 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1775 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1776 } 1777 1778 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1779 .get_settings = ql_get_settings, 1780 .get_drvinfo = ql_get_drvinfo, 1781 .get_link = ethtool_op_get_link, 1782 .get_msglevel = ql_get_msglevel, 1783 .set_msglevel = ql_set_msglevel, 1784 .get_pauseparam = ql_get_pauseparam, 1785 }; 1786 1787 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1788 { 1789 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1790 dma_addr_t map; 1791 int err; 1792 1793 while (lrg_buf_cb) { 1794 if (!lrg_buf_cb->skb) { 1795 lrg_buf_cb->skb = 1796 netdev_alloc_skb(qdev->ndev, 1797 qdev->lrg_buffer_len); 1798 if (unlikely(!lrg_buf_cb->skb)) { 1799 netdev_printk(KERN_DEBUG, qdev->ndev, 1800 "Failed netdev_alloc_skb()\n"); 1801 break; 1802 } else { 1803 /* 1804 * We save some space to copy the ethhdr from 1805 * first buffer 1806 */ 1807 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1808 map = pci_map_single(qdev->pdev, 1809 lrg_buf_cb->skb->data, 1810 qdev->lrg_buffer_len - 1811 QL_HEADER_SPACE, 1812 PCI_DMA_FROMDEVICE); 1813 1814 err = pci_dma_mapping_error(qdev->pdev, map); 1815 if (err) { 1816 netdev_err(qdev->ndev, 1817 "PCI mapping failed with error: %d\n", 1818 err); 1819 dev_kfree_skb(lrg_buf_cb->skb); 1820 lrg_buf_cb->skb = NULL; 1821 break; 1822 } 1823 1824 1825 lrg_buf_cb->buf_phy_addr_low = 1826 cpu_to_le32(LS_64BITS(map)); 1827 lrg_buf_cb->buf_phy_addr_high = 1828 cpu_to_le32(MS_64BITS(map)); 1829 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1830 dma_unmap_len_set(lrg_buf_cb, maplen, 1831 qdev->lrg_buffer_len - 1832 QL_HEADER_SPACE); 1833 --qdev->lrg_buf_skb_check; 1834 if (!qdev->lrg_buf_skb_check) 1835 return 1; 1836 } 1837 } 1838 lrg_buf_cb = lrg_buf_cb->next; 1839 } 1840 return 0; 1841 } 1842 1843 /* 1844 * Caller holds hw_lock. 1845 */ 1846 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1847 { 1848 struct ql3xxx_port_registers __iomem *port_regs = 1849 qdev->mem_map_registers; 1850 1851 if (qdev->small_buf_release_cnt >= 16) { 1852 while (qdev->small_buf_release_cnt >= 16) { 1853 qdev->small_buf_q_producer_index++; 1854 1855 if (qdev->small_buf_q_producer_index == 1856 NUM_SBUFQ_ENTRIES) 1857 qdev->small_buf_q_producer_index = 0; 1858 qdev->small_buf_release_cnt -= 8; 1859 } 1860 wmb(); 1861 writel(qdev->small_buf_q_producer_index, 1862 &port_regs->CommonRegs.rxSmallQProducerIndex); 1863 } 1864 } 1865 1866 /* 1867 * Caller holds hw_lock. 1868 */ 1869 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1870 { 1871 struct bufq_addr_element *lrg_buf_q_ele; 1872 int i; 1873 struct ql_rcv_buf_cb *lrg_buf_cb; 1874 struct ql3xxx_port_registers __iomem *port_regs = 1875 qdev->mem_map_registers; 1876 1877 if ((qdev->lrg_buf_free_count >= 8) && 1878 (qdev->lrg_buf_release_cnt >= 16)) { 1879 1880 if (qdev->lrg_buf_skb_check) 1881 if (!ql_populate_free_queue(qdev)) 1882 return; 1883 1884 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1885 1886 while ((qdev->lrg_buf_release_cnt >= 16) && 1887 (qdev->lrg_buf_free_count >= 8)) { 1888 1889 for (i = 0; i < 8; i++) { 1890 lrg_buf_cb = 1891 ql_get_from_lrg_buf_free_list(qdev); 1892 lrg_buf_q_ele->addr_high = 1893 lrg_buf_cb->buf_phy_addr_high; 1894 lrg_buf_q_ele->addr_low = 1895 lrg_buf_cb->buf_phy_addr_low; 1896 lrg_buf_q_ele++; 1897 1898 qdev->lrg_buf_release_cnt--; 1899 } 1900 1901 qdev->lrg_buf_q_producer_index++; 1902 1903 if (qdev->lrg_buf_q_producer_index == 1904 qdev->num_lbufq_entries) 1905 qdev->lrg_buf_q_producer_index = 0; 1906 1907 if (qdev->lrg_buf_q_producer_index == 1908 (qdev->num_lbufq_entries - 1)) { 1909 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1910 } 1911 } 1912 wmb(); 1913 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1914 writel(qdev->lrg_buf_q_producer_index, 1915 &port_regs->CommonRegs.rxLargeQProducerIndex); 1916 } 1917 } 1918 1919 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1920 struct ob_mac_iocb_rsp *mac_rsp) 1921 { 1922 struct ql_tx_buf_cb *tx_cb; 1923 int i; 1924 int retval = 0; 1925 1926 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1927 netdev_warn(qdev->ndev, 1928 "Frame too short but it was padded and sent\n"); 1929 } 1930 1931 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1932 1933 /* Check the transmit response flags for any errors */ 1934 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1935 netdev_err(qdev->ndev, 1936 "Frame too short to be legal, frame not sent\n"); 1937 1938 qdev->ndev->stats.tx_errors++; 1939 retval = -EIO; 1940 goto frame_not_sent; 1941 } 1942 1943 if (tx_cb->seg_count == 0) { 1944 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1945 mac_rsp->transaction_id); 1946 1947 qdev->ndev->stats.tx_errors++; 1948 retval = -EIO; 1949 goto invalid_seg_count; 1950 } 1951 1952 pci_unmap_single(qdev->pdev, 1953 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1954 dma_unmap_len(&tx_cb->map[0], maplen), 1955 PCI_DMA_TODEVICE); 1956 tx_cb->seg_count--; 1957 if (tx_cb->seg_count) { 1958 for (i = 1; i < tx_cb->seg_count; i++) { 1959 pci_unmap_page(qdev->pdev, 1960 dma_unmap_addr(&tx_cb->map[i], 1961 mapaddr), 1962 dma_unmap_len(&tx_cb->map[i], maplen), 1963 PCI_DMA_TODEVICE); 1964 } 1965 } 1966 qdev->ndev->stats.tx_packets++; 1967 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1968 1969 frame_not_sent: 1970 dev_kfree_skb_irq(tx_cb->skb); 1971 tx_cb->skb = NULL; 1972 1973 invalid_seg_count: 1974 atomic_inc(&qdev->tx_count); 1975 } 1976 1977 static void ql_get_sbuf(struct ql3_adapter *qdev) 1978 { 1979 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1980 qdev->small_buf_index = 0; 1981 qdev->small_buf_release_cnt++; 1982 } 1983 1984 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1985 { 1986 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1987 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1988 qdev->lrg_buf_release_cnt++; 1989 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1990 qdev->lrg_buf_index = 0; 1991 return lrg_buf_cb; 1992 } 1993 1994 /* 1995 * The difference between 3022 and 3032 for inbound completions: 1996 * 3022 uses two buffers per completion. The first buffer contains 1997 * (some) header info, the second the remainder of the headers plus 1998 * the data. For this chip we reserve some space at the top of the 1999 * receive buffer so that the header info in buffer one can be 2000 * prepended to the buffer two. Buffer two is the sent up while 2001 * buffer one is returned to the hardware to be reused. 2002 * 3032 receives all of it's data and headers in one buffer for a 2003 * simpler process. 3032 also supports checksum verification as 2004 * can be seen in ql_process_macip_rx_intr(). 2005 */ 2006 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2007 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2008 { 2009 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2010 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2011 struct sk_buff *skb; 2012 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2013 2014 /* 2015 * Get the inbound address list (small buffer). 2016 */ 2017 ql_get_sbuf(qdev); 2018 2019 if (qdev->device_id == QL3022_DEVICE_ID) 2020 lrg_buf_cb1 = ql_get_lbuf(qdev); 2021 2022 /* start of second buffer */ 2023 lrg_buf_cb2 = ql_get_lbuf(qdev); 2024 skb = lrg_buf_cb2->skb; 2025 2026 qdev->ndev->stats.rx_packets++; 2027 qdev->ndev->stats.rx_bytes += length; 2028 2029 skb_put(skb, length); 2030 pci_unmap_single(qdev->pdev, 2031 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2032 dma_unmap_len(lrg_buf_cb2, maplen), 2033 PCI_DMA_FROMDEVICE); 2034 prefetch(skb->data); 2035 skb_checksum_none_assert(skb); 2036 skb->protocol = eth_type_trans(skb, qdev->ndev); 2037 2038 netif_receive_skb(skb); 2039 lrg_buf_cb2->skb = NULL; 2040 2041 if (qdev->device_id == QL3022_DEVICE_ID) 2042 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2043 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2044 } 2045 2046 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2047 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2048 { 2049 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2050 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2051 struct sk_buff *skb1 = NULL, *skb2; 2052 struct net_device *ndev = qdev->ndev; 2053 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2054 u16 size = 0; 2055 2056 /* 2057 * Get the inbound address list (small buffer). 2058 */ 2059 2060 ql_get_sbuf(qdev); 2061 2062 if (qdev->device_id == QL3022_DEVICE_ID) { 2063 /* start of first buffer on 3022 */ 2064 lrg_buf_cb1 = ql_get_lbuf(qdev); 2065 skb1 = lrg_buf_cb1->skb; 2066 size = ETH_HLEN; 2067 if (*((u16 *) skb1->data) != 0xFFFF) 2068 size += VLAN_ETH_HLEN - ETH_HLEN; 2069 } 2070 2071 /* start of second buffer */ 2072 lrg_buf_cb2 = ql_get_lbuf(qdev); 2073 skb2 = lrg_buf_cb2->skb; 2074 2075 skb_put(skb2, length); /* Just the second buffer length here. */ 2076 pci_unmap_single(qdev->pdev, 2077 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2078 dma_unmap_len(lrg_buf_cb2, maplen), 2079 PCI_DMA_FROMDEVICE); 2080 prefetch(skb2->data); 2081 2082 skb_checksum_none_assert(skb2); 2083 if (qdev->device_id == QL3022_DEVICE_ID) { 2084 /* 2085 * Copy the ethhdr from first buffer to second. This 2086 * is necessary for 3022 IP completions. 2087 */ 2088 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2089 skb_push(skb2, size), size); 2090 } else { 2091 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2092 if (checksum & 2093 (IB_IP_IOCB_RSP_3032_ICE | 2094 IB_IP_IOCB_RSP_3032_CE)) { 2095 netdev_err(ndev, 2096 "%s: Bad checksum for this %s packet, checksum = %x\n", 2097 __func__, 2098 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2099 "TCP" : "UDP"), checksum); 2100 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2101 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2102 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2103 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2104 } 2105 } 2106 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2107 2108 netif_receive_skb(skb2); 2109 ndev->stats.rx_packets++; 2110 ndev->stats.rx_bytes += length; 2111 lrg_buf_cb2->skb = NULL; 2112 2113 if (qdev->device_id == QL3022_DEVICE_ID) 2114 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2115 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2116 } 2117 2118 static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2119 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2120 { 2121 struct net_rsp_iocb *net_rsp; 2122 struct net_device *ndev = qdev->ndev; 2123 int work_done = 0; 2124 2125 /* While there are entries in the completion queue. */ 2126 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2127 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2128 2129 net_rsp = qdev->rsp_current; 2130 rmb(); 2131 /* 2132 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2133 * if the inbound completion is for a VLAN. 2134 */ 2135 if (qdev->device_id == QL3032_DEVICE_ID) 2136 net_rsp->opcode &= 0x7f; 2137 switch (net_rsp->opcode) { 2138 2139 case OPCODE_OB_MAC_IOCB_FN0: 2140 case OPCODE_OB_MAC_IOCB_FN2: 2141 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2142 net_rsp); 2143 (*tx_cleaned)++; 2144 break; 2145 2146 case OPCODE_IB_MAC_IOCB: 2147 case OPCODE_IB_3032_MAC_IOCB: 2148 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2149 net_rsp); 2150 (*rx_cleaned)++; 2151 break; 2152 2153 case OPCODE_IB_IP_IOCB: 2154 case OPCODE_IB_3032_IP_IOCB: 2155 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2156 net_rsp); 2157 (*rx_cleaned)++; 2158 break; 2159 default: { 2160 u32 *tmp = (u32 *)net_rsp; 2161 netdev_err(ndev, 2162 "Hit default case, not handled!\n" 2163 " dropping the packet, opcode = %x\n" 2164 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2165 net_rsp->opcode, 2166 (unsigned long int)tmp[0], 2167 (unsigned long int)tmp[1], 2168 (unsigned long int)tmp[2], 2169 (unsigned long int)tmp[3]); 2170 } 2171 } 2172 2173 qdev->rsp_consumer_index++; 2174 2175 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2176 qdev->rsp_consumer_index = 0; 2177 qdev->rsp_current = qdev->rsp_q_virt_addr; 2178 } else { 2179 qdev->rsp_current++; 2180 } 2181 2182 work_done = *tx_cleaned + *rx_cleaned; 2183 } 2184 2185 return work_done; 2186 } 2187 2188 static int ql_poll(struct napi_struct *napi, int budget) 2189 { 2190 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2191 int rx_cleaned = 0, tx_cleaned = 0; 2192 unsigned long hw_flags; 2193 struct ql3xxx_port_registers __iomem *port_regs = 2194 qdev->mem_map_registers; 2195 2196 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2197 2198 if (tx_cleaned + rx_cleaned != budget) { 2199 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2200 __napi_complete(napi); 2201 ql_update_small_bufq_prod_index(qdev); 2202 ql_update_lrg_bufq_prod_index(qdev); 2203 writel(qdev->rsp_consumer_index, 2204 &port_regs->CommonRegs.rspQConsumerIndex); 2205 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2206 2207 ql_enable_interrupts(qdev); 2208 } 2209 return tx_cleaned + rx_cleaned; 2210 } 2211 2212 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2213 { 2214 2215 struct net_device *ndev = dev_id; 2216 struct ql3_adapter *qdev = netdev_priv(ndev); 2217 struct ql3xxx_port_registers __iomem *port_regs = 2218 qdev->mem_map_registers; 2219 u32 value; 2220 int handled = 1; 2221 u32 var; 2222 2223 value = ql_read_common_reg_l(qdev, 2224 &port_regs->CommonRegs.ispControlStatus); 2225 2226 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2227 spin_lock(&qdev->adapter_lock); 2228 netif_stop_queue(qdev->ndev); 2229 netif_carrier_off(qdev->ndev); 2230 ql_disable_interrupts(qdev); 2231 qdev->port_link_state = LS_DOWN; 2232 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2233 2234 if (value & ISP_CONTROL_FE) { 2235 /* 2236 * Chip Fatal Error. 2237 */ 2238 var = 2239 ql_read_page0_reg_l(qdev, 2240 &port_regs->PortFatalErrStatus); 2241 netdev_warn(ndev, 2242 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2243 var); 2244 set_bit(QL_RESET_START, &qdev->flags) ; 2245 } else { 2246 /* 2247 * Soft Reset Requested. 2248 */ 2249 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2250 netdev_err(ndev, 2251 "Another function issued a reset to the chip. ISR value = %x\n", 2252 value); 2253 } 2254 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2255 spin_unlock(&qdev->adapter_lock); 2256 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2257 ql_disable_interrupts(qdev); 2258 if (likely(napi_schedule_prep(&qdev->napi))) 2259 __napi_schedule(&qdev->napi); 2260 } else 2261 return IRQ_NONE; 2262 2263 return IRQ_RETVAL(handled); 2264 } 2265 2266 /* 2267 * Get the total number of segments needed for the given number of fragments. 2268 * This is necessary because outbound address lists (OAL) will be used when 2269 * more than two frags are given. Each address list has 5 addr/len pairs. 2270 * The 5th pair in each OAL is used to point to the next OAL if more frags 2271 * are coming. That is why the frags:segment count ratio is not linear. 2272 */ 2273 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2274 { 2275 if (qdev->device_id == QL3022_DEVICE_ID) 2276 return 1; 2277 2278 if (frags <= 2) 2279 return frags + 1; 2280 else if (frags <= 6) 2281 return frags + 2; 2282 else if (frags <= 10) 2283 return frags + 3; 2284 else if (frags <= 14) 2285 return frags + 4; 2286 else if (frags <= 18) 2287 return frags + 5; 2288 return -1; 2289 } 2290 2291 static void ql_hw_csum_setup(const struct sk_buff *skb, 2292 struct ob_mac_iocb_req *mac_iocb_ptr) 2293 { 2294 const struct iphdr *ip = ip_hdr(skb); 2295 2296 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2297 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2298 2299 if (ip->protocol == IPPROTO_TCP) { 2300 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2301 OB_3032MAC_IOCB_REQ_IC; 2302 } else { 2303 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2304 OB_3032MAC_IOCB_REQ_IC; 2305 } 2306 2307 } 2308 2309 /* 2310 * Map the buffers for this transmit. 2311 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2312 */ 2313 static int ql_send_map(struct ql3_adapter *qdev, 2314 struct ob_mac_iocb_req *mac_iocb_ptr, 2315 struct ql_tx_buf_cb *tx_cb, 2316 struct sk_buff *skb) 2317 { 2318 struct oal *oal; 2319 struct oal_entry *oal_entry; 2320 int len = skb_headlen(skb); 2321 dma_addr_t map; 2322 int err; 2323 int completed_segs, i; 2324 int seg_cnt, seg = 0; 2325 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2326 2327 seg_cnt = tx_cb->seg_count; 2328 /* 2329 * Map the skb buffer first. 2330 */ 2331 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2332 2333 err = pci_dma_mapping_error(qdev->pdev, map); 2334 if (err) { 2335 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2336 err); 2337 2338 return NETDEV_TX_BUSY; 2339 } 2340 2341 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2342 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2343 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2344 oal_entry->len = cpu_to_le32(len); 2345 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2346 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2347 seg++; 2348 2349 if (seg_cnt == 1) { 2350 /* Terminate the last segment. */ 2351 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2352 return NETDEV_TX_OK; 2353 } 2354 oal = tx_cb->oal; 2355 for (completed_segs = 0; 2356 completed_segs < frag_cnt; 2357 completed_segs++, seg++) { 2358 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2359 oal_entry++; 2360 /* 2361 * Check for continuation requirements. 2362 * It's strange but necessary. 2363 * Continuation entry points to outbound address list. 2364 */ 2365 if ((seg == 2 && seg_cnt > 3) || 2366 (seg == 7 && seg_cnt > 8) || 2367 (seg == 12 && seg_cnt > 13) || 2368 (seg == 17 && seg_cnt > 18)) { 2369 map = pci_map_single(qdev->pdev, oal, 2370 sizeof(struct oal), 2371 PCI_DMA_TODEVICE); 2372 2373 err = pci_dma_mapping_error(qdev->pdev, map); 2374 if (err) { 2375 netdev_err(qdev->ndev, 2376 "PCI mapping outbound address list with error: %d\n", 2377 err); 2378 goto map_error; 2379 } 2380 2381 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2382 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2383 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2384 OAL_CONT_ENTRY); 2385 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2386 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2387 sizeof(struct oal)); 2388 oal_entry = (struct oal_entry *)oal; 2389 oal++; 2390 seg++; 2391 } 2392 2393 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2394 DMA_TO_DEVICE); 2395 2396 err = dma_mapping_error(&qdev->pdev->dev, map); 2397 if (err) { 2398 netdev_err(qdev->ndev, 2399 "PCI mapping frags failed with error: %d\n", 2400 err); 2401 goto map_error; 2402 } 2403 2404 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2405 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2406 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2407 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2408 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2409 } 2410 /* Terminate the last segment. */ 2411 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2412 return NETDEV_TX_OK; 2413 2414 map_error: 2415 /* A PCI mapping failed and now we will need to back out 2416 * We need to traverse through the oal's and associated pages which 2417 * have been mapped and now we must unmap them to clean up properly 2418 */ 2419 2420 seg = 1; 2421 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2422 oal = tx_cb->oal; 2423 for (i = 0; i < completed_segs; i++, seg++) { 2424 oal_entry++; 2425 2426 /* 2427 * Check for continuation requirements. 2428 * It's strange but necessary. 2429 */ 2430 2431 if ((seg == 2 && seg_cnt > 3) || 2432 (seg == 7 && seg_cnt > 8) || 2433 (seg == 12 && seg_cnt > 13) || 2434 (seg == 17 && seg_cnt > 18)) { 2435 pci_unmap_single(qdev->pdev, 2436 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2437 dma_unmap_len(&tx_cb->map[seg], maplen), 2438 PCI_DMA_TODEVICE); 2439 oal++; 2440 seg++; 2441 } 2442 2443 pci_unmap_page(qdev->pdev, 2444 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2445 dma_unmap_len(&tx_cb->map[seg], maplen), 2446 PCI_DMA_TODEVICE); 2447 } 2448 2449 pci_unmap_single(qdev->pdev, 2450 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2451 dma_unmap_addr(&tx_cb->map[0], maplen), 2452 PCI_DMA_TODEVICE); 2453 2454 return NETDEV_TX_BUSY; 2455 2456 } 2457 2458 /* 2459 * The difference between 3022 and 3032 sends: 2460 * 3022 only supports a simple single segment transmission. 2461 * 3032 supports checksumming and scatter/gather lists (fragments). 2462 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2463 * in the IOCB plus a chain of outbound address lists (OAL) that 2464 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2465 * will be used to point to an OAL when more ALP entries are required. 2466 * The IOCB is always the top of the chain followed by one or more 2467 * OALs (when necessary). 2468 */ 2469 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2470 struct net_device *ndev) 2471 { 2472 struct ql3_adapter *qdev = netdev_priv(ndev); 2473 struct ql3xxx_port_registers __iomem *port_regs = 2474 qdev->mem_map_registers; 2475 struct ql_tx_buf_cb *tx_cb; 2476 u32 tot_len = skb->len; 2477 struct ob_mac_iocb_req *mac_iocb_ptr; 2478 2479 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2480 return NETDEV_TX_BUSY; 2481 2482 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2483 tx_cb->seg_count = ql_get_seg_count(qdev, 2484 skb_shinfo(skb)->nr_frags); 2485 if (tx_cb->seg_count == -1) { 2486 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2487 return NETDEV_TX_OK; 2488 } 2489 2490 mac_iocb_ptr = tx_cb->queue_entry; 2491 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2492 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2493 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2494 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2495 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2496 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2497 tx_cb->skb = skb; 2498 if (qdev->device_id == QL3032_DEVICE_ID && 2499 skb->ip_summed == CHECKSUM_PARTIAL) 2500 ql_hw_csum_setup(skb, mac_iocb_ptr); 2501 2502 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2503 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2504 return NETDEV_TX_BUSY; 2505 } 2506 2507 wmb(); 2508 qdev->req_producer_index++; 2509 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2510 qdev->req_producer_index = 0; 2511 wmb(); 2512 ql_write_common_reg_l(qdev, 2513 &port_regs->CommonRegs.reqQProducerIndex, 2514 qdev->req_producer_index); 2515 2516 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2517 "tx queued, slot %d, len %d\n", 2518 qdev->req_producer_index, skb->len); 2519 2520 atomic_dec(&qdev->tx_count); 2521 return NETDEV_TX_OK; 2522 } 2523 2524 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2525 { 2526 qdev->req_q_size = 2527 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2528 2529 qdev->req_q_virt_addr = 2530 pci_alloc_consistent(qdev->pdev, 2531 (size_t) qdev->req_q_size, 2532 &qdev->req_q_phy_addr); 2533 2534 if ((qdev->req_q_virt_addr == NULL) || 2535 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2536 netdev_err(qdev->ndev, "reqQ failed\n"); 2537 return -ENOMEM; 2538 } 2539 2540 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2541 2542 qdev->rsp_q_virt_addr = 2543 pci_alloc_consistent(qdev->pdev, 2544 (size_t) qdev->rsp_q_size, 2545 &qdev->rsp_q_phy_addr); 2546 2547 if ((qdev->rsp_q_virt_addr == NULL) || 2548 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2549 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2550 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2551 qdev->req_q_virt_addr, 2552 qdev->req_q_phy_addr); 2553 return -ENOMEM; 2554 } 2555 2556 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2557 2558 return 0; 2559 } 2560 2561 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2562 { 2563 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2564 netdev_info(qdev->ndev, "Already done\n"); 2565 return; 2566 } 2567 2568 pci_free_consistent(qdev->pdev, 2569 qdev->req_q_size, 2570 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2571 2572 qdev->req_q_virt_addr = NULL; 2573 2574 pci_free_consistent(qdev->pdev, 2575 qdev->rsp_q_size, 2576 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2577 2578 qdev->rsp_q_virt_addr = NULL; 2579 2580 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2581 } 2582 2583 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2584 { 2585 /* Create Large Buffer Queue */ 2586 qdev->lrg_buf_q_size = 2587 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2588 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2589 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2590 else 2591 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2592 2593 qdev->lrg_buf = 2594 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), 2595 GFP_KERNEL); 2596 if (qdev->lrg_buf == NULL) { 2597 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); 2598 return -ENOMEM; 2599 } 2600 2601 qdev->lrg_buf_q_alloc_virt_addr = 2602 pci_alloc_consistent(qdev->pdev, 2603 qdev->lrg_buf_q_alloc_size, 2604 &qdev->lrg_buf_q_alloc_phy_addr); 2605 2606 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2607 netdev_err(qdev->ndev, "lBufQ failed\n"); 2608 return -ENOMEM; 2609 } 2610 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2611 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2612 2613 /* Create Small Buffer Queue */ 2614 qdev->small_buf_q_size = 2615 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2616 if (qdev->small_buf_q_size < PAGE_SIZE) 2617 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2618 else 2619 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2620 2621 qdev->small_buf_q_alloc_virt_addr = 2622 pci_alloc_consistent(qdev->pdev, 2623 qdev->small_buf_q_alloc_size, 2624 &qdev->small_buf_q_alloc_phy_addr); 2625 2626 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2627 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2628 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2629 qdev->lrg_buf_q_alloc_virt_addr, 2630 qdev->lrg_buf_q_alloc_phy_addr); 2631 return -ENOMEM; 2632 } 2633 2634 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2635 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2636 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2637 return 0; 2638 } 2639 2640 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2641 { 2642 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2643 netdev_info(qdev->ndev, "Already done\n"); 2644 return; 2645 } 2646 kfree(qdev->lrg_buf); 2647 pci_free_consistent(qdev->pdev, 2648 qdev->lrg_buf_q_alloc_size, 2649 qdev->lrg_buf_q_alloc_virt_addr, 2650 qdev->lrg_buf_q_alloc_phy_addr); 2651 2652 qdev->lrg_buf_q_virt_addr = NULL; 2653 2654 pci_free_consistent(qdev->pdev, 2655 qdev->small_buf_q_alloc_size, 2656 qdev->small_buf_q_alloc_virt_addr, 2657 qdev->small_buf_q_alloc_phy_addr); 2658 2659 qdev->small_buf_q_virt_addr = NULL; 2660 2661 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2662 } 2663 2664 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2665 { 2666 int i; 2667 struct bufq_addr_element *small_buf_q_entry; 2668 2669 /* Currently we allocate on one of memory and use it for smallbuffers */ 2670 qdev->small_buf_total_size = 2671 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2672 QL_SMALL_BUFFER_SIZE); 2673 2674 qdev->small_buf_virt_addr = 2675 pci_alloc_consistent(qdev->pdev, 2676 qdev->small_buf_total_size, 2677 &qdev->small_buf_phy_addr); 2678 2679 if (qdev->small_buf_virt_addr == NULL) { 2680 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2681 return -ENOMEM; 2682 } 2683 2684 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2685 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2686 2687 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2688 2689 /* Initialize the small buffer queue. */ 2690 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2691 small_buf_q_entry->addr_high = 2692 cpu_to_le32(qdev->small_buf_phy_addr_high); 2693 small_buf_q_entry->addr_low = 2694 cpu_to_le32(qdev->small_buf_phy_addr_low + 2695 (i * QL_SMALL_BUFFER_SIZE)); 2696 small_buf_q_entry++; 2697 } 2698 qdev->small_buf_index = 0; 2699 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2700 return 0; 2701 } 2702 2703 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2704 { 2705 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2706 netdev_info(qdev->ndev, "Already done\n"); 2707 return; 2708 } 2709 if (qdev->small_buf_virt_addr != NULL) { 2710 pci_free_consistent(qdev->pdev, 2711 qdev->small_buf_total_size, 2712 qdev->small_buf_virt_addr, 2713 qdev->small_buf_phy_addr); 2714 2715 qdev->small_buf_virt_addr = NULL; 2716 } 2717 } 2718 2719 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2720 { 2721 int i = 0; 2722 struct ql_rcv_buf_cb *lrg_buf_cb; 2723 2724 for (i = 0; i < qdev->num_large_buffers; i++) { 2725 lrg_buf_cb = &qdev->lrg_buf[i]; 2726 if (lrg_buf_cb->skb) { 2727 dev_kfree_skb(lrg_buf_cb->skb); 2728 pci_unmap_single(qdev->pdev, 2729 dma_unmap_addr(lrg_buf_cb, mapaddr), 2730 dma_unmap_len(lrg_buf_cb, maplen), 2731 PCI_DMA_FROMDEVICE); 2732 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2733 } else { 2734 break; 2735 } 2736 } 2737 } 2738 2739 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2740 { 2741 int i; 2742 struct ql_rcv_buf_cb *lrg_buf_cb; 2743 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2744 2745 for (i = 0; i < qdev->num_large_buffers; i++) { 2746 lrg_buf_cb = &qdev->lrg_buf[i]; 2747 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2748 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2749 buf_addr_ele++; 2750 } 2751 qdev->lrg_buf_index = 0; 2752 qdev->lrg_buf_skb_check = 0; 2753 } 2754 2755 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2756 { 2757 int i; 2758 struct ql_rcv_buf_cb *lrg_buf_cb; 2759 struct sk_buff *skb; 2760 dma_addr_t map; 2761 int err; 2762 2763 for (i = 0; i < qdev->num_large_buffers; i++) { 2764 skb = netdev_alloc_skb(qdev->ndev, 2765 qdev->lrg_buffer_len); 2766 if (unlikely(!skb)) { 2767 /* Better luck next round */ 2768 netdev_err(qdev->ndev, 2769 "large buff alloc failed for %d bytes at index %d\n", 2770 qdev->lrg_buffer_len * 2, i); 2771 ql_free_large_buffers(qdev); 2772 return -ENOMEM; 2773 } else { 2774 2775 lrg_buf_cb = &qdev->lrg_buf[i]; 2776 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2777 lrg_buf_cb->index = i; 2778 lrg_buf_cb->skb = skb; 2779 /* 2780 * We save some space to copy the ethhdr from first 2781 * buffer 2782 */ 2783 skb_reserve(skb, QL_HEADER_SPACE); 2784 map = pci_map_single(qdev->pdev, 2785 skb->data, 2786 qdev->lrg_buffer_len - 2787 QL_HEADER_SPACE, 2788 PCI_DMA_FROMDEVICE); 2789 2790 err = pci_dma_mapping_error(qdev->pdev, map); 2791 if (err) { 2792 netdev_err(qdev->ndev, 2793 "PCI mapping failed with error: %d\n", 2794 err); 2795 ql_free_large_buffers(qdev); 2796 return -ENOMEM; 2797 } 2798 2799 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2800 dma_unmap_len_set(lrg_buf_cb, maplen, 2801 qdev->lrg_buffer_len - 2802 QL_HEADER_SPACE); 2803 lrg_buf_cb->buf_phy_addr_low = 2804 cpu_to_le32(LS_64BITS(map)); 2805 lrg_buf_cb->buf_phy_addr_high = 2806 cpu_to_le32(MS_64BITS(map)); 2807 } 2808 } 2809 return 0; 2810 } 2811 2812 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2813 { 2814 struct ql_tx_buf_cb *tx_cb; 2815 int i; 2816 2817 tx_cb = &qdev->tx_buf[0]; 2818 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2819 kfree(tx_cb->oal); 2820 tx_cb->oal = NULL; 2821 tx_cb++; 2822 } 2823 } 2824 2825 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2826 { 2827 struct ql_tx_buf_cb *tx_cb; 2828 int i; 2829 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2830 2831 /* Create free list of transmit buffers */ 2832 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2833 2834 tx_cb = &qdev->tx_buf[i]; 2835 tx_cb->skb = NULL; 2836 tx_cb->queue_entry = req_q_curr; 2837 req_q_curr++; 2838 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2839 if (tx_cb->oal == NULL) 2840 return -1; 2841 } 2842 return 0; 2843 } 2844 2845 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2846 { 2847 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2848 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2849 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2850 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2851 /* 2852 * Bigger buffers, so less of them. 2853 */ 2854 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2855 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2856 } else { 2857 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2858 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2859 return -ENOMEM; 2860 } 2861 qdev->num_large_buffers = 2862 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2863 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2864 qdev->max_frame_size = 2865 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2866 2867 /* 2868 * First allocate a page of shared memory and use it for shadow 2869 * locations of Network Request Queue Consumer Address Register and 2870 * Network Completion Queue Producer Index Register 2871 */ 2872 qdev->shadow_reg_virt_addr = 2873 pci_alloc_consistent(qdev->pdev, 2874 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2875 2876 if (qdev->shadow_reg_virt_addr != NULL) { 2877 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2878 qdev->req_consumer_index_phy_addr_high = 2879 MS_64BITS(qdev->shadow_reg_phy_addr); 2880 qdev->req_consumer_index_phy_addr_low = 2881 LS_64BITS(qdev->shadow_reg_phy_addr); 2882 2883 qdev->prsp_producer_index = 2884 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2885 qdev->rsp_producer_index_phy_addr_high = 2886 qdev->req_consumer_index_phy_addr_high; 2887 qdev->rsp_producer_index_phy_addr_low = 2888 qdev->req_consumer_index_phy_addr_low + 8; 2889 } else { 2890 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2891 return -ENOMEM; 2892 } 2893 2894 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2895 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2896 goto err_req_rsp; 2897 } 2898 2899 if (ql_alloc_buffer_queues(qdev) != 0) { 2900 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2901 goto err_buffer_queues; 2902 } 2903 2904 if (ql_alloc_small_buffers(qdev) != 0) { 2905 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2906 goto err_small_buffers; 2907 } 2908 2909 if (ql_alloc_large_buffers(qdev) != 0) { 2910 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2911 goto err_small_buffers; 2912 } 2913 2914 /* Initialize the large buffer queue. */ 2915 ql_init_large_buffers(qdev); 2916 if (ql_create_send_free_list(qdev)) 2917 goto err_free_list; 2918 2919 qdev->rsp_current = qdev->rsp_q_virt_addr; 2920 2921 return 0; 2922 err_free_list: 2923 ql_free_send_free_list(qdev); 2924 err_small_buffers: 2925 ql_free_buffer_queues(qdev); 2926 err_buffer_queues: 2927 ql_free_net_req_rsp_queues(qdev); 2928 err_req_rsp: 2929 pci_free_consistent(qdev->pdev, 2930 PAGE_SIZE, 2931 qdev->shadow_reg_virt_addr, 2932 qdev->shadow_reg_phy_addr); 2933 2934 return -ENOMEM; 2935 } 2936 2937 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2938 { 2939 ql_free_send_free_list(qdev); 2940 ql_free_large_buffers(qdev); 2941 ql_free_small_buffers(qdev); 2942 ql_free_buffer_queues(qdev); 2943 ql_free_net_req_rsp_queues(qdev); 2944 if (qdev->shadow_reg_virt_addr != NULL) { 2945 pci_free_consistent(qdev->pdev, 2946 PAGE_SIZE, 2947 qdev->shadow_reg_virt_addr, 2948 qdev->shadow_reg_phy_addr); 2949 qdev->shadow_reg_virt_addr = NULL; 2950 } 2951 } 2952 2953 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2954 { 2955 struct ql3xxx_local_ram_registers __iomem *local_ram = 2956 (void __iomem *)qdev->mem_map_registers; 2957 2958 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2959 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2960 2) << 4)) 2961 return -1; 2962 2963 ql_write_page2_reg(qdev, 2964 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2965 2966 ql_write_page2_reg(qdev, 2967 &local_ram->maxBufletCount, 2968 qdev->nvram_data.bufletCount); 2969 2970 ql_write_page2_reg(qdev, 2971 &local_ram->freeBufletThresholdLow, 2972 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2973 (qdev->nvram_data.tcpWindowThreshold0)); 2974 2975 ql_write_page2_reg(qdev, 2976 &local_ram->freeBufletThresholdHigh, 2977 qdev->nvram_data.tcpWindowThreshold50); 2978 2979 ql_write_page2_reg(qdev, 2980 &local_ram->ipHashTableBase, 2981 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2982 qdev->nvram_data.ipHashTableBaseLo); 2983 ql_write_page2_reg(qdev, 2984 &local_ram->ipHashTableCount, 2985 qdev->nvram_data.ipHashTableSize); 2986 ql_write_page2_reg(qdev, 2987 &local_ram->tcpHashTableBase, 2988 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2989 qdev->nvram_data.tcpHashTableBaseLo); 2990 ql_write_page2_reg(qdev, 2991 &local_ram->tcpHashTableCount, 2992 qdev->nvram_data.tcpHashTableSize); 2993 ql_write_page2_reg(qdev, 2994 &local_ram->ncbBase, 2995 (qdev->nvram_data.ncbTableBaseHi << 16) | 2996 qdev->nvram_data.ncbTableBaseLo); 2997 ql_write_page2_reg(qdev, 2998 &local_ram->maxNcbCount, 2999 qdev->nvram_data.ncbTableSize); 3000 ql_write_page2_reg(qdev, 3001 &local_ram->drbBase, 3002 (qdev->nvram_data.drbTableBaseHi << 16) | 3003 qdev->nvram_data.drbTableBaseLo); 3004 ql_write_page2_reg(qdev, 3005 &local_ram->maxDrbCount, 3006 qdev->nvram_data.drbTableSize); 3007 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3008 return 0; 3009 } 3010 3011 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3012 { 3013 u32 value; 3014 struct ql3xxx_port_registers __iomem *port_regs = 3015 qdev->mem_map_registers; 3016 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3017 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3018 (void __iomem *)port_regs; 3019 u32 delay = 10; 3020 int status = 0; 3021 unsigned long hw_flags = 0; 3022 3023 if (ql_mii_setup(qdev)) 3024 return -1; 3025 3026 /* Bring out PHY out of reset */ 3027 ql_write_common_reg(qdev, spir, 3028 (ISP_SERIAL_PORT_IF_WE | 3029 (ISP_SERIAL_PORT_IF_WE << 16))); 3030 /* Give the PHY time to come out of reset. */ 3031 mdelay(100); 3032 qdev->port_link_state = LS_DOWN; 3033 netif_carrier_off(qdev->ndev); 3034 3035 /* V2 chip fix for ARS-39168. */ 3036 ql_write_common_reg(qdev, spir, 3037 (ISP_SERIAL_PORT_IF_SDE | 3038 (ISP_SERIAL_PORT_IF_SDE << 16))); 3039 3040 /* Request Queue Registers */ 3041 *((u32 *)(qdev->preq_consumer_index)) = 0; 3042 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3043 qdev->req_producer_index = 0; 3044 3045 ql_write_page1_reg(qdev, 3046 &hmem_regs->reqConsumerIndexAddrHigh, 3047 qdev->req_consumer_index_phy_addr_high); 3048 ql_write_page1_reg(qdev, 3049 &hmem_regs->reqConsumerIndexAddrLow, 3050 qdev->req_consumer_index_phy_addr_low); 3051 3052 ql_write_page1_reg(qdev, 3053 &hmem_regs->reqBaseAddrHigh, 3054 MS_64BITS(qdev->req_q_phy_addr)); 3055 ql_write_page1_reg(qdev, 3056 &hmem_regs->reqBaseAddrLow, 3057 LS_64BITS(qdev->req_q_phy_addr)); 3058 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3059 3060 /* Response Queue Registers */ 3061 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3062 qdev->rsp_consumer_index = 0; 3063 qdev->rsp_current = qdev->rsp_q_virt_addr; 3064 3065 ql_write_page1_reg(qdev, 3066 &hmem_regs->rspProducerIndexAddrHigh, 3067 qdev->rsp_producer_index_phy_addr_high); 3068 3069 ql_write_page1_reg(qdev, 3070 &hmem_regs->rspProducerIndexAddrLow, 3071 qdev->rsp_producer_index_phy_addr_low); 3072 3073 ql_write_page1_reg(qdev, 3074 &hmem_regs->rspBaseAddrHigh, 3075 MS_64BITS(qdev->rsp_q_phy_addr)); 3076 3077 ql_write_page1_reg(qdev, 3078 &hmem_regs->rspBaseAddrLow, 3079 LS_64BITS(qdev->rsp_q_phy_addr)); 3080 3081 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3082 3083 /* Large Buffer Queue */ 3084 ql_write_page1_reg(qdev, 3085 &hmem_regs->rxLargeQBaseAddrHigh, 3086 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3087 3088 ql_write_page1_reg(qdev, 3089 &hmem_regs->rxLargeQBaseAddrLow, 3090 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3091 3092 ql_write_page1_reg(qdev, 3093 &hmem_regs->rxLargeQLength, 3094 qdev->num_lbufq_entries); 3095 3096 ql_write_page1_reg(qdev, 3097 &hmem_regs->rxLargeBufferLength, 3098 qdev->lrg_buffer_len); 3099 3100 /* Small Buffer Queue */ 3101 ql_write_page1_reg(qdev, 3102 &hmem_regs->rxSmallQBaseAddrHigh, 3103 MS_64BITS(qdev->small_buf_q_phy_addr)); 3104 3105 ql_write_page1_reg(qdev, 3106 &hmem_regs->rxSmallQBaseAddrLow, 3107 LS_64BITS(qdev->small_buf_q_phy_addr)); 3108 3109 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3110 ql_write_page1_reg(qdev, 3111 &hmem_regs->rxSmallBufferLength, 3112 QL_SMALL_BUFFER_SIZE); 3113 3114 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3115 qdev->small_buf_release_cnt = 8; 3116 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3117 qdev->lrg_buf_release_cnt = 8; 3118 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3119 qdev->small_buf_index = 0; 3120 qdev->lrg_buf_index = 0; 3121 qdev->lrg_buf_free_count = 0; 3122 qdev->lrg_buf_free_head = NULL; 3123 qdev->lrg_buf_free_tail = NULL; 3124 3125 ql_write_common_reg(qdev, 3126 &port_regs->CommonRegs. 3127 rxSmallQProducerIndex, 3128 qdev->small_buf_q_producer_index); 3129 ql_write_common_reg(qdev, 3130 &port_regs->CommonRegs. 3131 rxLargeQProducerIndex, 3132 qdev->lrg_buf_q_producer_index); 3133 3134 /* 3135 * Find out if the chip has already been initialized. If it has, then 3136 * we skip some of the initialization. 3137 */ 3138 clear_bit(QL_LINK_MASTER, &qdev->flags); 3139 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3140 if ((value & PORT_STATUS_IC) == 0) { 3141 3142 /* Chip has not been configured yet, so let it rip. */ 3143 if (ql_init_misc_registers(qdev)) { 3144 status = -1; 3145 goto out; 3146 } 3147 3148 value = qdev->nvram_data.tcpMaxWindowSize; 3149 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3150 3151 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3152 3153 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3154 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3155 * 2) << 13)) { 3156 status = -1; 3157 goto out; 3158 } 3159 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3160 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3161 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3162 16) | (INTERNAL_CHIP_SD | 3163 INTERNAL_CHIP_WE))); 3164 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3165 } 3166 3167 if (qdev->mac_index) 3168 ql_write_page0_reg(qdev, 3169 &port_regs->mac1MaxFrameLengthReg, 3170 qdev->max_frame_size); 3171 else 3172 ql_write_page0_reg(qdev, 3173 &port_regs->mac0MaxFrameLengthReg, 3174 qdev->max_frame_size); 3175 3176 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3177 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3178 2) << 7)) { 3179 status = -1; 3180 goto out; 3181 } 3182 3183 PHY_Setup(qdev); 3184 ql_init_scan_mode(qdev); 3185 ql_get_phy_owner(qdev); 3186 3187 /* Load the MAC Configuration */ 3188 3189 /* Program lower 32 bits of the MAC address */ 3190 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3191 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3192 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3193 ((qdev->ndev->dev_addr[2] << 24) 3194 | (qdev->ndev->dev_addr[3] << 16) 3195 | (qdev->ndev->dev_addr[4] << 8) 3196 | qdev->ndev->dev_addr[5])); 3197 3198 /* Program top 16 bits of the MAC address */ 3199 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3200 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3201 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3202 ((qdev->ndev->dev_addr[0] << 8) 3203 | qdev->ndev->dev_addr[1])); 3204 3205 /* Enable Primary MAC */ 3206 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3207 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3208 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3209 3210 /* Clear Primary and Secondary IP addresses */ 3211 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3212 ((IP_ADDR_INDEX_REG_MASK << 16) | 3213 (qdev->mac_index << 2))); 3214 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3215 3216 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3217 ((IP_ADDR_INDEX_REG_MASK << 16) | 3218 ((qdev->mac_index << 2) + 1))); 3219 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3220 3221 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3222 3223 /* Indicate Configuration Complete */ 3224 ql_write_page0_reg(qdev, 3225 &port_regs->portControl, 3226 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3227 3228 do { 3229 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3230 if (value & PORT_STATUS_IC) 3231 break; 3232 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3233 msleep(500); 3234 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3235 } while (--delay); 3236 3237 if (delay == 0) { 3238 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3239 status = -1; 3240 goto out; 3241 } 3242 3243 /* Enable Ethernet Function */ 3244 if (qdev->device_id == QL3032_DEVICE_ID) { 3245 value = 3246 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3247 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3248 QL3032_PORT_CONTROL_ET); 3249 ql_write_page0_reg(qdev, &port_regs->functionControl, 3250 ((value << 16) | value)); 3251 } else { 3252 value = 3253 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3254 PORT_CONTROL_HH); 3255 ql_write_page0_reg(qdev, &port_regs->portControl, 3256 ((value << 16) | value)); 3257 } 3258 3259 3260 out: 3261 return status; 3262 } 3263 3264 /* 3265 * Caller holds hw_lock. 3266 */ 3267 static int ql_adapter_reset(struct ql3_adapter *qdev) 3268 { 3269 struct ql3xxx_port_registers __iomem *port_regs = 3270 qdev->mem_map_registers; 3271 int status = 0; 3272 u16 value; 3273 int max_wait_time; 3274 3275 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3276 clear_bit(QL_RESET_DONE, &qdev->flags); 3277 3278 /* 3279 * Issue soft reset to chip. 3280 */ 3281 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3282 ql_write_common_reg(qdev, 3283 &port_regs->CommonRegs.ispControlStatus, 3284 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3285 3286 /* Wait 3 seconds for reset to complete. */ 3287 netdev_printk(KERN_DEBUG, qdev->ndev, 3288 "Wait 10 milliseconds for reset to complete\n"); 3289 3290 /* Wait until the firmware tells us the Soft Reset is done */ 3291 max_wait_time = 5; 3292 do { 3293 value = 3294 ql_read_common_reg(qdev, 3295 &port_regs->CommonRegs.ispControlStatus); 3296 if ((value & ISP_CONTROL_SR) == 0) 3297 break; 3298 3299 ssleep(1); 3300 } while ((--max_wait_time)); 3301 3302 /* 3303 * Also, make sure that the Network Reset Interrupt bit has been 3304 * cleared after the soft reset has taken place. 3305 */ 3306 value = 3307 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3308 if (value & ISP_CONTROL_RI) { 3309 netdev_printk(KERN_DEBUG, qdev->ndev, 3310 "clearing RI after reset\n"); 3311 ql_write_common_reg(qdev, 3312 &port_regs->CommonRegs. 3313 ispControlStatus, 3314 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3315 } 3316 3317 if (max_wait_time == 0) { 3318 /* Issue Force Soft Reset */ 3319 ql_write_common_reg(qdev, 3320 &port_regs->CommonRegs. 3321 ispControlStatus, 3322 ((ISP_CONTROL_FSR << 16) | 3323 ISP_CONTROL_FSR)); 3324 /* 3325 * Wait until the firmware tells us the Force Soft Reset is 3326 * done 3327 */ 3328 max_wait_time = 5; 3329 do { 3330 value = ql_read_common_reg(qdev, 3331 &port_regs->CommonRegs. 3332 ispControlStatus); 3333 if ((value & ISP_CONTROL_FSR) == 0) 3334 break; 3335 ssleep(1); 3336 } while ((--max_wait_time)); 3337 } 3338 if (max_wait_time == 0) 3339 status = 1; 3340 3341 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3342 set_bit(QL_RESET_DONE, &qdev->flags); 3343 return status; 3344 } 3345 3346 static void ql_set_mac_info(struct ql3_adapter *qdev) 3347 { 3348 struct ql3xxx_port_registers __iomem *port_regs = 3349 qdev->mem_map_registers; 3350 u32 value, port_status; 3351 u8 func_number; 3352 3353 /* Get the function number */ 3354 value = 3355 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3356 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3357 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3358 switch (value & ISP_CONTROL_FN_MASK) { 3359 case ISP_CONTROL_FN0_NET: 3360 qdev->mac_index = 0; 3361 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3362 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3363 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3364 if (port_status & PORT_STATUS_SM0) 3365 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3366 else 3367 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3368 break; 3369 3370 case ISP_CONTROL_FN1_NET: 3371 qdev->mac_index = 1; 3372 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3373 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3374 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3375 if (port_status & PORT_STATUS_SM1) 3376 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3377 else 3378 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3379 break; 3380 3381 case ISP_CONTROL_FN0_SCSI: 3382 case ISP_CONTROL_FN1_SCSI: 3383 default: 3384 netdev_printk(KERN_DEBUG, qdev->ndev, 3385 "Invalid function number, ispControlStatus = 0x%x\n", 3386 value); 3387 break; 3388 } 3389 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3390 } 3391 3392 static void ql_display_dev_info(struct net_device *ndev) 3393 { 3394 struct ql3_adapter *qdev = netdev_priv(ndev); 3395 struct pci_dev *pdev = qdev->pdev; 3396 3397 netdev_info(ndev, 3398 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3399 DRV_NAME, qdev->index, qdev->chip_rev_id, 3400 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3401 qdev->pci_slot); 3402 netdev_info(ndev, "%s Interface\n", 3403 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3404 3405 /* 3406 * Print PCI bus width/type. 3407 */ 3408 netdev_info(ndev, "Bus interface is %s %s\n", 3409 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3410 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3411 3412 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3413 qdev->mem_map_registers); 3414 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3415 3416 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3417 } 3418 3419 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3420 { 3421 struct net_device *ndev = qdev->ndev; 3422 int retval = 0; 3423 3424 netif_stop_queue(ndev); 3425 netif_carrier_off(ndev); 3426 3427 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3428 clear_bit(QL_LINK_MASTER, &qdev->flags); 3429 3430 ql_disable_interrupts(qdev); 3431 3432 free_irq(qdev->pdev->irq, ndev); 3433 3434 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3435 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3436 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3437 pci_disable_msi(qdev->pdev); 3438 } 3439 3440 del_timer_sync(&qdev->adapter_timer); 3441 3442 napi_disable(&qdev->napi); 3443 3444 if (do_reset) { 3445 int soft_reset; 3446 unsigned long hw_flags; 3447 3448 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3449 if (ql_wait_for_drvr_lock(qdev)) { 3450 soft_reset = ql_adapter_reset(qdev); 3451 if (soft_reset) { 3452 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3453 qdev->index); 3454 } 3455 netdev_err(ndev, 3456 "Releasing driver lock via chip reset\n"); 3457 } else { 3458 netdev_err(ndev, 3459 "Could not acquire driver lock to do reset!\n"); 3460 retval = -1; 3461 } 3462 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3463 } 3464 ql_free_mem_resources(qdev); 3465 return retval; 3466 } 3467 3468 static int ql_adapter_up(struct ql3_adapter *qdev) 3469 { 3470 struct net_device *ndev = qdev->ndev; 3471 int err; 3472 unsigned long irq_flags = IRQF_SHARED; 3473 unsigned long hw_flags; 3474 3475 if (ql_alloc_mem_resources(qdev)) { 3476 netdev_err(ndev, "Unable to allocate buffers\n"); 3477 return -ENOMEM; 3478 } 3479 3480 if (qdev->msi) { 3481 if (pci_enable_msi(qdev->pdev)) { 3482 netdev_err(ndev, 3483 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3484 qdev->msi = 0; 3485 } else { 3486 netdev_info(ndev, "MSI Enabled...\n"); 3487 set_bit(QL_MSI_ENABLED, &qdev->flags); 3488 irq_flags &= ~IRQF_SHARED; 3489 } 3490 } 3491 3492 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3493 irq_flags, ndev->name, ndev); 3494 if (err) { 3495 netdev_err(ndev, 3496 "Failed to reserve interrupt %d - already in use\n", 3497 qdev->pdev->irq); 3498 goto err_irq; 3499 } 3500 3501 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3502 3503 err = ql_wait_for_drvr_lock(qdev); 3504 if (err) { 3505 err = ql_adapter_initialize(qdev); 3506 if (err) { 3507 netdev_err(ndev, "Unable to initialize adapter\n"); 3508 goto err_init; 3509 } 3510 netdev_err(ndev, "Releasing driver lock\n"); 3511 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3512 } else { 3513 netdev_err(ndev, "Could not acquire driver lock\n"); 3514 goto err_lock; 3515 } 3516 3517 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3518 3519 set_bit(QL_ADAPTER_UP, &qdev->flags); 3520 3521 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3522 3523 napi_enable(&qdev->napi); 3524 ql_enable_interrupts(qdev); 3525 return 0; 3526 3527 err_init: 3528 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3529 err_lock: 3530 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3531 free_irq(qdev->pdev->irq, ndev); 3532 err_irq: 3533 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3534 netdev_info(ndev, "calling pci_disable_msi()\n"); 3535 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3536 pci_disable_msi(qdev->pdev); 3537 } 3538 return err; 3539 } 3540 3541 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3542 { 3543 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3544 netdev_err(qdev->ndev, 3545 "Driver up/down cycle failed, closing device\n"); 3546 rtnl_lock(); 3547 dev_close(qdev->ndev); 3548 rtnl_unlock(); 3549 return -1; 3550 } 3551 return 0; 3552 } 3553 3554 static int ql3xxx_close(struct net_device *ndev) 3555 { 3556 struct ql3_adapter *qdev = netdev_priv(ndev); 3557 3558 /* 3559 * Wait for device to recover from a reset. 3560 * (Rarely happens, but possible.) 3561 */ 3562 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3563 msleep(50); 3564 3565 ql_adapter_down(qdev, QL_DO_RESET); 3566 return 0; 3567 } 3568 3569 static int ql3xxx_open(struct net_device *ndev) 3570 { 3571 struct ql3_adapter *qdev = netdev_priv(ndev); 3572 return ql_adapter_up(qdev); 3573 } 3574 3575 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3576 { 3577 struct ql3_adapter *qdev = netdev_priv(ndev); 3578 struct ql3xxx_port_registers __iomem *port_regs = 3579 qdev->mem_map_registers; 3580 struct sockaddr *addr = p; 3581 unsigned long hw_flags; 3582 3583 if (netif_running(ndev)) 3584 return -EBUSY; 3585 3586 if (!is_valid_ether_addr(addr->sa_data)) 3587 return -EADDRNOTAVAIL; 3588 3589 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3590 3591 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3592 /* Program lower 32 bits of the MAC address */ 3593 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3594 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3595 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3596 ((ndev->dev_addr[2] << 24) | (ndev-> 3597 dev_addr[3] << 16) | 3598 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3599 3600 /* Program top 16 bits of the MAC address */ 3601 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3602 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3603 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3604 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3605 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3606 3607 return 0; 3608 } 3609 3610 static void ql3xxx_tx_timeout(struct net_device *ndev) 3611 { 3612 struct ql3_adapter *qdev = netdev_priv(ndev); 3613 3614 netdev_err(ndev, "Resetting...\n"); 3615 /* 3616 * Stop the queues, we've got a problem. 3617 */ 3618 netif_stop_queue(ndev); 3619 3620 /* 3621 * Wake up the worker to process this event. 3622 */ 3623 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3624 } 3625 3626 static void ql_reset_work(struct work_struct *work) 3627 { 3628 struct ql3_adapter *qdev = 3629 container_of(work, struct ql3_adapter, reset_work.work); 3630 struct net_device *ndev = qdev->ndev; 3631 u32 value; 3632 struct ql_tx_buf_cb *tx_cb; 3633 int max_wait_time, i; 3634 struct ql3xxx_port_registers __iomem *port_regs = 3635 qdev->mem_map_registers; 3636 unsigned long hw_flags; 3637 3638 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3639 clear_bit(QL_LINK_MASTER, &qdev->flags); 3640 3641 /* 3642 * Loop through the active list and return the skb. 3643 */ 3644 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3645 int j; 3646 tx_cb = &qdev->tx_buf[i]; 3647 if (tx_cb->skb) { 3648 netdev_printk(KERN_DEBUG, ndev, 3649 "Freeing lost SKB\n"); 3650 pci_unmap_single(qdev->pdev, 3651 dma_unmap_addr(&tx_cb->map[0], 3652 mapaddr), 3653 dma_unmap_len(&tx_cb->map[0], maplen), 3654 PCI_DMA_TODEVICE); 3655 for (j = 1; j < tx_cb->seg_count; j++) { 3656 pci_unmap_page(qdev->pdev, 3657 dma_unmap_addr(&tx_cb->map[j], 3658 mapaddr), 3659 dma_unmap_len(&tx_cb->map[j], 3660 maplen), 3661 PCI_DMA_TODEVICE); 3662 } 3663 dev_kfree_skb(tx_cb->skb); 3664 tx_cb->skb = NULL; 3665 } 3666 } 3667 3668 netdev_err(ndev, "Clearing NRI after reset\n"); 3669 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3670 ql_write_common_reg(qdev, 3671 &port_regs->CommonRegs. 3672 ispControlStatus, 3673 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3674 /* 3675 * Wait the for Soft Reset to Complete. 3676 */ 3677 max_wait_time = 10; 3678 do { 3679 value = ql_read_common_reg(qdev, 3680 &port_regs->CommonRegs. 3681 3682 ispControlStatus); 3683 if ((value & ISP_CONTROL_SR) == 0) { 3684 netdev_printk(KERN_DEBUG, ndev, 3685 "reset completed\n"); 3686 break; 3687 } 3688 3689 if (value & ISP_CONTROL_RI) { 3690 netdev_printk(KERN_DEBUG, ndev, 3691 "clearing NRI after reset\n"); 3692 ql_write_common_reg(qdev, 3693 &port_regs-> 3694 CommonRegs. 3695 ispControlStatus, 3696 ((ISP_CONTROL_RI << 3697 16) | ISP_CONTROL_RI)); 3698 } 3699 3700 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3701 ssleep(1); 3702 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3703 } while (--max_wait_time); 3704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3705 3706 if (value & ISP_CONTROL_SR) { 3707 3708 /* 3709 * Set the reset flags and clear the board again. 3710 * Nothing else to do... 3711 */ 3712 netdev_err(ndev, 3713 "Timed out waiting for reset to complete\n"); 3714 netdev_err(ndev, "Do a reset\n"); 3715 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3716 clear_bit(QL_RESET_START, &qdev->flags); 3717 ql_cycle_adapter(qdev, QL_DO_RESET); 3718 return; 3719 } 3720 3721 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3722 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3723 clear_bit(QL_RESET_START, &qdev->flags); 3724 ql_cycle_adapter(qdev, QL_NO_RESET); 3725 } 3726 } 3727 3728 static void ql_tx_timeout_work(struct work_struct *work) 3729 { 3730 struct ql3_adapter *qdev = 3731 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3732 3733 ql_cycle_adapter(qdev, QL_DO_RESET); 3734 } 3735 3736 static void ql_get_board_info(struct ql3_adapter *qdev) 3737 { 3738 struct ql3xxx_port_registers __iomem *port_regs = 3739 qdev->mem_map_registers; 3740 u32 value; 3741 3742 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3743 3744 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3745 if (value & PORT_STATUS_64) 3746 qdev->pci_width = 64; 3747 else 3748 qdev->pci_width = 32; 3749 if (value & PORT_STATUS_X) 3750 qdev->pci_x = 1; 3751 else 3752 qdev->pci_x = 0; 3753 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3754 } 3755 3756 static void ql3xxx_timer(unsigned long ptr) 3757 { 3758 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3759 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3760 } 3761 3762 static const struct net_device_ops ql3xxx_netdev_ops = { 3763 .ndo_open = ql3xxx_open, 3764 .ndo_start_xmit = ql3xxx_send, 3765 .ndo_stop = ql3xxx_close, 3766 .ndo_change_mtu = eth_change_mtu, 3767 .ndo_validate_addr = eth_validate_addr, 3768 .ndo_set_mac_address = ql3xxx_set_mac_address, 3769 .ndo_tx_timeout = ql3xxx_tx_timeout, 3770 }; 3771 3772 static int __devinit ql3xxx_probe(struct pci_dev *pdev, 3773 const struct pci_device_id *pci_entry) 3774 { 3775 struct net_device *ndev = NULL; 3776 struct ql3_adapter *qdev = NULL; 3777 static int cards_found; 3778 int uninitialized_var(pci_using_dac), err; 3779 3780 err = pci_enable_device(pdev); 3781 if (err) { 3782 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3783 goto err_out; 3784 } 3785 3786 err = pci_request_regions(pdev, DRV_NAME); 3787 if (err) { 3788 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3789 goto err_out_disable_pdev; 3790 } 3791 3792 pci_set_master(pdev); 3793 3794 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3795 pci_using_dac = 1; 3796 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3797 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3798 pci_using_dac = 0; 3799 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3800 } 3801 3802 if (err) { 3803 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3804 goto err_out_free_regions; 3805 } 3806 3807 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3808 if (!ndev) { 3809 pr_err("%s could not alloc etherdev\n", pci_name(pdev)); 3810 err = -ENOMEM; 3811 goto err_out_free_regions; 3812 } 3813 3814 SET_NETDEV_DEV(ndev, &pdev->dev); 3815 3816 pci_set_drvdata(pdev, ndev); 3817 3818 qdev = netdev_priv(ndev); 3819 qdev->index = cards_found; 3820 qdev->ndev = ndev; 3821 qdev->pdev = pdev; 3822 qdev->device_id = pci_entry->device; 3823 qdev->port_link_state = LS_DOWN; 3824 if (msi) 3825 qdev->msi = 1; 3826 3827 qdev->msg_enable = netif_msg_init(debug, default_msg); 3828 3829 if (pci_using_dac) 3830 ndev->features |= NETIF_F_HIGHDMA; 3831 if (qdev->device_id == QL3032_DEVICE_ID) 3832 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3833 3834 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3835 if (!qdev->mem_map_registers) { 3836 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3837 err = -EIO; 3838 goto err_out_free_ndev; 3839 } 3840 3841 spin_lock_init(&qdev->adapter_lock); 3842 spin_lock_init(&qdev->hw_lock); 3843 3844 /* Set driver entry points */ 3845 ndev->netdev_ops = &ql3xxx_netdev_ops; 3846 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3847 ndev->watchdog_timeo = 5 * HZ; 3848 3849 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3850 3851 ndev->irq = pdev->irq; 3852 3853 /* make sure the EEPROM is good */ 3854 if (ql_get_nvram_params(qdev)) { 3855 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3856 __func__, qdev->index); 3857 err = -EIO; 3858 goto err_out_iounmap; 3859 } 3860 3861 ql_set_mac_info(qdev); 3862 3863 /* Validate and set parameters */ 3864 if (qdev->mac_index) { 3865 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3866 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3867 } else { 3868 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3869 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3870 } 3871 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 3872 3873 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3874 3875 /* Record PCI bus information. */ 3876 ql_get_board_info(qdev); 3877 3878 /* 3879 * Set the Maximum Memory Read Byte Count value. We do this to handle 3880 * jumbo frames. 3881 */ 3882 if (qdev->pci_x) 3883 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3884 3885 err = register_netdev(ndev); 3886 if (err) { 3887 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3888 goto err_out_iounmap; 3889 } 3890 3891 /* we're going to reset, so assume we have no link for now */ 3892 3893 netif_carrier_off(ndev); 3894 netif_stop_queue(ndev); 3895 3896 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3897 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3898 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3899 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3900 3901 init_timer(&qdev->adapter_timer); 3902 qdev->adapter_timer.function = ql3xxx_timer; 3903 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3904 qdev->adapter_timer.data = (unsigned long)qdev; 3905 3906 if (!cards_found) { 3907 pr_alert("%s\n", DRV_STRING); 3908 pr_alert("Driver name: %s, Version: %s\n", 3909 DRV_NAME, DRV_VERSION); 3910 } 3911 ql_display_dev_info(ndev); 3912 3913 cards_found++; 3914 return 0; 3915 3916 err_out_iounmap: 3917 iounmap(qdev->mem_map_registers); 3918 err_out_free_ndev: 3919 free_netdev(ndev); 3920 err_out_free_regions: 3921 pci_release_regions(pdev); 3922 err_out_disable_pdev: 3923 pci_disable_device(pdev); 3924 pci_set_drvdata(pdev, NULL); 3925 err_out: 3926 return err; 3927 } 3928 3929 static void __devexit ql3xxx_remove(struct pci_dev *pdev) 3930 { 3931 struct net_device *ndev = pci_get_drvdata(pdev); 3932 struct ql3_adapter *qdev = netdev_priv(ndev); 3933 3934 unregister_netdev(ndev); 3935 3936 ql_disable_interrupts(qdev); 3937 3938 if (qdev->workqueue) { 3939 cancel_delayed_work(&qdev->reset_work); 3940 cancel_delayed_work(&qdev->tx_timeout_work); 3941 destroy_workqueue(qdev->workqueue); 3942 qdev->workqueue = NULL; 3943 } 3944 3945 iounmap(qdev->mem_map_registers); 3946 pci_release_regions(pdev); 3947 pci_set_drvdata(pdev, NULL); 3948 free_netdev(ndev); 3949 } 3950 3951 static struct pci_driver ql3xxx_driver = { 3952 3953 .name = DRV_NAME, 3954 .id_table = ql3xxx_pci_tbl, 3955 .probe = ql3xxx_probe, 3956 .remove = __devexit_p(ql3xxx_remove), 3957 }; 3958 3959 static int __init ql3xxx_init_module(void) 3960 { 3961 return pci_register_driver(&ql3xxx_driver); 3962 } 3963 3964 static void __exit ql3xxx_exit(void) 3965 { 3966 pci_unregister_driver(&ql3xxx_driver); 3967 } 3968 3969 module_init(ql3xxx_init_module); 3970 module_exit(ql3xxx_exit); 3971