1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/pci.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dmapool.h> 19 #include <linux/mempool.h> 20 #include <linux/spinlock.h> 21 #include <linux/kthread.h> 22 #include <linux/interrupt.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/ip.h> 26 #include <linux/in.h> 27 #include <linux/if_arp.h> 28 #include <linux/if_ether.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/ethtool.h> 32 #include <linux/skbuff.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/if_vlan.h> 35 #include <linux/delay.h> 36 #include <linux/mm.h> 37 #include <linux/prefetch.h> 38 39 #include "qla3xxx.h" 40 41 #define DRV_NAME "qla3xxx" 42 #define DRV_STRING "QLogic ISP3XXX Network Driver" 43 #define DRV_VERSION "v2.03.00-k5" 44 45 static const char ql3xxx_driver_name[] = DRV_NAME; 46 static const char ql3xxx_driver_version[] = DRV_VERSION; 47 48 #define TIMED_OUT_MSG \ 49 "Timed out waiting for management port to get free before issuing command\n" 50 51 MODULE_AUTHOR("QLogic Corporation"); 52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 53 MODULE_LICENSE("GPL"); 54 MODULE_VERSION(DRV_VERSION); 55 56 static const u32 default_msg 57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 59 60 static int debug = -1; /* defaults above */ 61 module_param(debug, int, 0); 62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64 static int msi; 65 module_param(msi, int, 0); 66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 67 68 static const struct pci_device_id ql3xxx_pci_tbl[] = { 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 71 /* required last entry */ 72 {0,} 73 }; 74 75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 76 77 /* 78 * These are the known PHY's which are used 79 */ 80 enum PHY_DEVICE_TYPE { 81 PHY_TYPE_UNKNOWN = 0, 82 PHY_VITESSE_VSC8211, 83 PHY_AGERE_ET1011C, 84 MAX_PHY_DEV_TYPES 85 }; 86 87 struct PHY_DEVICE_INFO { 88 const enum PHY_DEVICE_TYPE phyDevice; 89 const u32 phyIdOUI; 90 const u16 phyIdModel; 91 const char *name; 92 }; 93 94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 98 }; 99 100 101 /* 102 * Caller must take hw_lock. 103 */ 104 static int ql_sem_spinlock(struct ql3_adapter *qdev, 105 u32 sem_mask, u32 sem_bits) 106 { 107 struct ql3xxx_port_registers __iomem *port_regs = 108 qdev->mem_map_registers; 109 u32 value; 110 unsigned int seconds = 3; 111 112 do { 113 writel((sem_mask | sem_bits), 114 &port_regs->CommonRegs.semaphoreReg); 115 value = readl(&port_regs->CommonRegs.semaphoreReg); 116 if ((value & (sem_mask >> 16)) == sem_bits) 117 return 0; 118 ssleep(1); 119 } while (--seconds); 120 return -1; 121 } 122 123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 124 { 125 struct ql3xxx_port_registers __iomem *port_regs = 126 qdev->mem_map_registers; 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg); 129 } 130 131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 132 { 133 struct ql3xxx_port_registers __iomem *port_regs = 134 qdev->mem_map_registers; 135 u32 value; 136 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 138 value = readl(&port_regs->CommonRegs.semaphoreReg); 139 return ((value & (sem_mask >> 16)) == sem_bits); 140 } 141 142 /* 143 * Caller holds hw_lock. 144 */ 145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 146 { 147 int i = 0; 148 149 do { 150 if (ql_sem_lock(qdev, 151 QL_DRVR_SEM_MASK, 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 153 * 2) << 1)) { 154 netdev_printk(KERN_DEBUG, qdev->ndev, 155 "driver lock acquired\n"); 156 return 1; 157 } 158 ssleep(1); 159 } while (++i < 10); 160 161 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 162 return 0; 163 } 164 165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 166 { 167 struct ql3xxx_port_registers __iomem *port_regs = 168 qdev->mem_map_registers; 169 170 writel(((ISP_CONTROL_NP_MASK << 16) | page), 171 &port_regs->CommonRegs.ispControlStatus); 172 readl(&port_regs->CommonRegs.ispControlStatus); 173 qdev->current_page = page; 174 } 175 176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 177 { 178 u32 value; 179 unsigned long hw_flags; 180 181 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 182 value = readl(reg); 183 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 184 185 return value; 186 } 187 188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 189 { 190 return readl(reg); 191 } 192 193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 194 { 195 u32 value; 196 unsigned long hw_flags; 197 198 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 199 200 if (qdev->current_page != 0) 201 ql_set_register_page(qdev, 0); 202 value = readl(reg); 203 204 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 205 return value; 206 } 207 208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 209 { 210 if (qdev->current_page != 0) 211 ql_set_register_page(qdev, 0); 212 return readl(reg); 213 } 214 215 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 216 u32 __iomem *reg, u32 value) 217 { 218 unsigned long hw_flags; 219 220 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 221 writel(value, reg); 222 readl(reg); 223 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 224 } 225 226 static void ql_write_common_reg(struct ql3_adapter *qdev, 227 u32 __iomem *reg, u32 value) 228 { 229 writel(value, reg); 230 readl(reg); 231 } 232 233 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 234 u32 __iomem *reg, u32 value) 235 { 236 writel(value, reg); 237 readl(reg); 238 udelay(1); 239 } 240 241 static void ql_write_page0_reg(struct ql3_adapter *qdev, 242 u32 __iomem *reg, u32 value) 243 { 244 if (qdev->current_page != 0) 245 ql_set_register_page(qdev, 0); 246 writel(value, reg); 247 readl(reg); 248 } 249 250 /* 251 * Caller holds hw_lock. Only called during init. 252 */ 253 static void ql_write_page1_reg(struct ql3_adapter *qdev, 254 u32 __iomem *reg, u32 value) 255 { 256 if (qdev->current_page != 1) 257 ql_set_register_page(qdev, 1); 258 writel(value, reg); 259 readl(reg); 260 } 261 262 /* 263 * Caller holds hw_lock. Only called during init. 264 */ 265 static void ql_write_page2_reg(struct ql3_adapter *qdev, 266 u32 __iomem *reg, u32 value) 267 { 268 if (qdev->current_page != 2) 269 ql_set_register_page(qdev, 2); 270 writel(value, reg); 271 readl(reg); 272 } 273 274 static void ql_disable_interrupts(struct ql3_adapter *qdev) 275 { 276 struct ql3xxx_port_registers __iomem *port_regs = 277 qdev->mem_map_registers; 278 279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 280 (ISP_IMR_ENABLE_INT << 16)); 281 282 } 283 284 static void ql_enable_interrupts(struct ql3_adapter *qdev) 285 { 286 struct ql3xxx_port_registers __iomem *port_regs = 287 qdev->mem_map_registers; 288 289 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 290 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 291 292 } 293 294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 295 struct ql_rcv_buf_cb *lrg_buf_cb) 296 { 297 dma_addr_t map; 298 int err; 299 lrg_buf_cb->next = NULL; 300 301 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 302 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 303 } else { 304 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 305 qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } 307 308 if (!lrg_buf_cb->skb) { 309 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 310 qdev->lrg_buffer_len); 311 if (unlikely(!lrg_buf_cb->skb)) { 312 qdev->lrg_buf_skb_check++; 313 } else { 314 /* 315 * We save some space to copy the ethhdr from first 316 * buffer 317 */ 318 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 319 map = pci_map_single(qdev->pdev, 320 lrg_buf_cb->skb->data, 321 qdev->lrg_buffer_len - 322 QL_HEADER_SPACE, 323 PCI_DMA_FROMDEVICE); 324 err = pci_dma_mapping_error(qdev->pdev, map); 325 if (err) { 326 netdev_err(qdev->ndev, 327 "PCI mapping failed with error: %d\n", 328 err); 329 dev_kfree_skb(lrg_buf_cb->skb); 330 lrg_buf_cb->skb = NULL; 331 332 qdev->lrg_buf_skb_check++; 333 return; 334 } 335 336 lrg_buf_cb->buf_phy_addr_low = 337 cpu_to_le32(LS_64BITS(map)); 338 lrg_buf_cb->buf_phy_addr_high = 339 cpu_to_le32(MS_64BITS(map)); 340 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 341 dma_unmap_len_set(lrg_buf_cb, maplen, 342 qdev->lrg_buffer_len - 343 QL_HEADER_SPACE); 344 } 345 } 346 347 qdev->lrg_buf_free_count++; 348 } 349 350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 351 *qdev) 352 { 353 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 354 355 if (lrg_buf_cb != NULL) { 356 qdev->lrg_buf_free_head = lrg_buf_cb->next; 357 if (qdev->lrg_buf_free_head == NULL) 358 qdev->lrg_buf_free_tail = NULL; 359 qdev->lrg_buf_free_count--; 360 } 361 362 return lrg_buf_cb; 363 } 364 365 static u32 addrBits = EEPROM_NO_ADDR_BITS; 366 static u32 dataBits = EEPROM_NO_DATA_BITS; 367 368 static void fm93c56a_deselect(struct ql3_adapter *qdev); 369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 370 unsigned short *value); 371 372 /* 373 * Caller holds hw_lock. 374 */ 375 static void fm93c56a_select(struct ql3_adapter *qdev) 376 { 377 struct ql3xxx_port_registers __iomem *port_regs = 378 qdev->mem_map_registers; 379 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 380 381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 383 ql_write_nvram_reg(qdev, spir, 384 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 385 } 386 387 /* 388 * Caller holds hw_lock. 389 */ 390 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 391 { 392 int i; 393 u32 mask; 394 u32 dataBit; 395 u32 previousBit; 396 struct ql3xxx_port_registers __iomem *port_regs = 397 qdev->mem_map_registers; 398 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 399 400 /* Clock in a zero, then do the start bit */ 401 ql_write_nvram_reg(qdev, spir, 402 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 403 AUBURN_EEPROM_DO_1)); 404 ql_write_nvram_reg(qdev, spir, 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 406 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 407 ql_write_nvram_reg(qdev, spir, 408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 409 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 410 411 mask = 1 << (FM93C56A_CMD_BITS - 1); 412 /* Force the previous data bit to be different */ 413 previousBit = 0xffff; 414 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 415 dataBit = (cmd & mask) 416 ? AUBURN_EEPROM_DO_1 417 : AUBURN_EEPROM_DO_0; 418 if (previousBit != dataBit) { 419 /* If the bit changed, change the DO state to match */ 420 ql_write_nvram_reg(qdev, spir, 421 (ISP_NVRAM_MASK | 422 qdev->eeprom_cmd_data | dataBit)); 423 previousBit = dataBit; 424 } 425 ql_write_nvram_reg(qdev, spir, 426 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 427 dataBit | AUBURN_EEPROM_CLK_RISE)); 428 ql_write_nvram_reg(qdev, spir, 429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 430 dataBit | AUBURN_EEPROM_CLK_FALL)); 431 cmd = cmd << 1; 432 } 433 434 mask = 1 << (addrBits - 1); 435 /* Force the previous data bit to be different */ 436 previousBit = 0xffff; 437 for (i = 0; i < addrBits; i++) { 438 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 439 : AUBURN_EEPROM_DO_0; 440 if (previousBit != dataBit) { 441 /* 442 * If the bit changed, then change the DO state to 443 * match 444 */ 445 ql_write_nvram_reg(qdev, spir, 446 (ISP_NVRAM_MASK | 447 qdev->eeprom_cmd_data | dataBit)); 448 previousBit = dataBit; 449 } 450 ql_write_nvram_reg(qdev, spir, 451 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 452 dataBit | AUBURN_EEPROM_CLK_RISE)); 453 ql_write_nvram_reg(qdev, spir, 454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 455 dataBit | AUBURN_EEPROM_CLK_FALL)); 456 eepromAddr = eepromAddr << 1; 457 } 458 } 459 460 /* 461 * Caller holds hw_lock. 462 */ 463 static void fm93c56a_deselect(struct ql3_adapter *qdev) 464 { 465 struct ql3xxx_port_registers __iomem *port_regs = 466 qdev->mem_map_registers; 467 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 468 469 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 470 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 471 } 472 473 /* 474 * Caller holds hw_lock. 475 */ 476 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 477 { 478 int i; 479 u32 data = 0; 480 u32 dataBit; 481 struct ql3xxx_port_registers __iomem *port_regs = 482 qdev->mem_map_registers; 483 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 484 485 /* Read the data bits */ 486 /* The first bit is a dummy. Clock right over it. */ 487 for (i = 0; i < dataBits; i++) { 488 ql_write_nvram_reg(qdev, spir, 489 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 490 AUBURN_EEPROM_CLK_RISE); 491 ql_write_nvram_reg(qdev, spir, 492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 493 AUBURN_EEPROM_CLK_FALL); 494 dataBit = (ql_read_common_reg(qdev, spir) & 495 AUBURN_EEPROM_DI_1) ? 1 : 0; 496 data = (data << 1) | dataBit; 497 } 498 *value = (u16)data; 499 } 500 501 /* 502 * Caller holds hw_lock. 503 */ 504 static void eeprom_readword(struct ql3_adapter *qdev, 505 u32 eepromAddr, unsigned short *value) 506 { 507 fm93c56a_select(qdev); 508 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 509 fm93c56a_datain(qdev, value); 510 fm93c56a_deselect(qdev); 511 } 512 513 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 514 { 515 __le16 *p = (__le16 *)ndev->dev_addr; 516 p[0] = cpu_to_le16(addr[0]); 517 p[1] = cpu_to_le16(addr[1]); 518 p[2] = cpu_to_le16(addr[2]); 519 } 520 521 static int ql_get_nvram_params(struct ql3_adapter *qdev) 522 { 523 u16 *pEEPROMData; 524 u16 checksum = 0; 525 u32 index; 526 unsigned long hw_flags; 527 528 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 529 530 pEEPROMData = (u16 *)&qdev->nvram_data; 531 qdev->eeprom_cmd_data = 0; 532 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 533 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 534 2) << 10)) { 535 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 536 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 537 return -1; 538 } 539 540 for (index = 0; index < EEPROM_SIZE; index++) { 541 eeprom_readword(qdev, index, pEEPROMData); 542 checksum += *pEEPROMData; 543 pEEPROMData++; 544 } 545 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 546 547 if (checksum != 0) { 548 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 549 checksum); 550 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 551 return -1; 552 } 553 554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 555 return checksum; 556 } 557 558 static const u32 PHYAddr[2] = { 559 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 560 }; 561 562 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 563 { 564 struct ql3xxx_port_registers __iomem *port_regs = 565 qdev->mem_map_registers; 566 u32 temp; 567 int count = 1000; 568 569 while (count) { 570 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 571 if (!(temp & MAC_MII_STATUS_BSY)) 572 return 0; 573 udelay(10); 574 count--; 575 } 576 return -1; 577 } 578 579 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 580 { 581 struct ql3xxx_port_registers __iomem *port_regs = 582 qdev->mem_map_registers; 583 u32 scanControl; 584 585 if (qdev->numPorts > 1) { 586 /* Auto scan will cycle through multiple ports */ 587 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 588 } else { 589 scanControl = MAC_MII_CONTROL_SC; 590 } 591 592 /* 593 * Scan register 1 of PHY/PETBI, 594 * Set up to scan both devices 595 * The autoscan starts from the first register, completes 596 * the last one before rolling over to the first 597 */ 598 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 599 PHYAddr[0] | MII_SCAN_REGISTER); 600 601 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 602 (scanControl) | 603 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 604 } 605 606 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 607 { 608 u8 ret; 609 struct ql3xxx_port_registers __iomem *port_regs = 610 qdev->mem_map_registers; 611 612 /* See if scan mode is enabled before we turn it off */ 613 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 614 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 615 /* Scan is enabled */ 616 ret = 1; 617 } else { 618 /* Scan is disabled */ 619 ret = 0; 620 } 621 622 /* 623 * When disabling scan mode you must first change the MII register 624 * address 625 */ 626 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 627 PHYAddr[0] | MII_SCAN_REGISTER); 628 629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 630 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 631 MAC_MII_CONTROL_RC) << 16)); 632 633 return ret; 634 } 635 636 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 637 u16 regAddr, u16 value, u32 phyAddr) 638 { 639 struct ql3xxx_port_registers __iomem *port_regs = 640 qdev->mem_map_registers; 641 u8 scanWasEnabled; 642 643 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 644 645 if (ql_wait_for_mii_ready(qdev)) { 646 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 647 return -1; 648 } 649 650 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 651 phyAddr | regAddr); 652 653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 654 655 /* Wait for write to complete 9/10/04 SJP */ 656 if (ql_wait_for_mii_ready(qdev)) { 657 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 658 return -1; 659 } 660 661 if (scanWasEnabled) 662 ql_mii_enable_scan_mode(qdev); 663 664 return 0; 665 } 666 667 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 668 u16 *value, u32 phyAddr) 669 { 670 struct ql3xxx_port_registers __iomem *port_regs = 671 qdev->mem_map_registers; 672 u8 scanWasEnabled; 673 u32 temp; 674 675 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 676 677 if (ql_wait_for_mii_ready(qdev)) { 678 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 679 return -1; 680 } 681 682 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 683 phyAddr | regAddr); 684 685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 686 (MAC_MII_CONTROL_RC << 16)); 687 688 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 689 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 690 691 /* Wait for the read to complete */ 692 if (ql_wait_for_mii_ready(qdev)) { 693 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 694 return -1; 695 } 696 697 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 698 *value = (u16) temp; 699 700 if (scanWasEnabled) 701 ql_mii_enable_scan_mode(qdev); 702 703 return 0; 704 } 705 706 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 707 { 708 struct ql3xxx_port_registers __iomem *port_regs = 709 qdev->mem_map_registers; 710 711 ql_mii_disable_scan_mode(qdev); 712 713 if (ql_wait_for_mii_ready(qdev)) { 714 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 715 return -1; 716 } 717 718 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 719 qdev->PHYAddr | regAddr); 720 721 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 722 723 /* Wait for write to complete. */ 724 if (ql_wait_for_mii_ready(qdev)) { 725 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 726 return -1; 727 } 728 729 ql_mii_enable_scan_mode(qdev); 730 731 return 0; 732 } 733 734 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 735 { 736 u32 temp; 737 struct ql3xxx_port_registers __iomem *port_regs = 738 qdev->mem_map_registers; 739 740 ql_mii_disable_scan_mode(qdev); 741 742 if (ql_wait_for_mii_ready(qdev)) { 743 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 744 return -1; 745 } 746 747 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 748 qdev->PHYAddr | regAddr); 749 750 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 751 (MAC_MII_CONTROL_RC << 16)); 752 753 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 754 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 755 756 /* Wait for the read to complete */ 757 if (ql_wait_for_mii_ready(qdev)) { 758 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 759 return -1; 760 } 761 762 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 763 *value = (u16) temp; 764 765 ql_mii_enable_scan_mode(qdev); 766 767 return 0; 768 } 769 770 static void ql_petbi_reset(struct ql3_adapter *qdev) 771 { 772 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 773 } 774 775 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 776 { 777 u16 reg; 778 779 /* Enable Auto-negotiation sense */ 780 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 781 reg |= PETBI_TBI_AUTO_SENSE; 782 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 783 784 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 785 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 786 787 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 788 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 789 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 790 791 } 792 793 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 794 { 795 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 796 PHYAddr[qdev->mac_index]); 797 } 798 799 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 800 { 801 u16 reg; 802 803 /* Enable Auto-negotiation sense */ 804 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 805 PHYAddr[qdev->mac_index]); 806 reg |= PETBI_TBI_AUTO_SENSE; 807 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 808 PHYAddr[qdev->mac_index]); 809 810 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 811 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 812 PHYAddr[qdev->mac_index]); 813 814 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 815 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 816 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 817 PHYAddr[qdev->mac_index]); 818 } 819 820 static void ql_petbi_init(struct ql3_adapter *qdev) 821 { 822 ql_petbi_reset(qdev); 823 ql_petbi_start_neg(qdev); 824 } 825 826 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 827 { 828 ql_petbi_reset_ex(qdev); 829 ql_petbi_start_neg_ex(qdev); 830 } 831 832 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 833 { 834 u16 reg; 835 836 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 837 return 0; 838 839 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 840 } 841 842 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 843 { 844 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 845 /* power down device bit 11 = 1 */ 846 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 847 /* enable diagnostic mode bit 2 = 1 */ 848 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 849 /* 1000MB amplitude adjust (see Agere errata) */ 850 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 851 /* 1000MB amplitude adjust (see Agere errata) */ 852 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 853 /* 100MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 855 /* 100MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 857 /* 10MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 859 /* 10MB amplitude adjust (see Agere errata) */ 860 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 861 /* point to hidden reg 0x2806 */ 862 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 863 /* Write new PHYAD w/bit 5 set */ 864 ql_mii_write_reg_ex(qdev, 0x11, 865 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 866 /* 867 * Disable diagnostic mode bit 2 = 0 868 * Power up device bit 11 = 0 869 * Link up (on) and activity (blink) 870 */ 871 ql_mii_write_reg(qdev, 0x12, 0x840a); 872 ql_mii_write_reg(qdev, 0x00, 0x1140); 873 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 874 } 875 876 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 877 u16 phyIdReg0, u16 phyIdReg1) 878 { 879 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 880 u32 oui; 881 u16 model; 882 int i; 883 884 if (phyIdReg0 == 0xffff) 885 return result; 886 887 if (phyIdReg1 == 0xffff) 888 return result; 889 890 /* oui is split between two registers */ 891 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 892 893 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 894 895 /* Scan table for this PHY */ 896 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 897 if ((oui == PHY_DEVICES[i].phyIdOUI) && 898 (model == PHY_DEVICES[i].phyIdModel)) { 899 netdev_info(qdev->ndev, "Phy: %s\n", 900 PHY_DEVICES[i].name); 901 result = PHY_DEVICES[i].phyDevice; 902 break; 903 } 904 } 905 906 return result; 907 } 908 909 static int ql_phy_get_speed(struct ql3_adapter *qdev) 910 { 911 u16 reg; 912 913 switch (qdev->phyType) { 914 case PHY_AGERE_ET1011C: { 915 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 916 return 0; 917 918 reg = (reg >> 8) & 3; 919 break; 920 } 921 default: 922 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 923 return 0; 924 925 reg = (((reg & 0x18) >> 3) & 3); 926 } 927 928 switch (reg) { 929 case 2: 930 return SPEED_1000; 931 case 1: 932 return SPEED_100; 933 case 0: 934 return SPEED_10; 935 default: 936 return -1; 937 } 938 } 939 940 static int ql_is_full_dup(struct ql3_adapter *qdev) 941 { 942 u16 reg; 943 944 switch (qdev->phyType) { 945 case PHY_AGERE_ET1011C: { 946 if (ql_mii_read_reg(qdev, 0x1A, ®)) 947 return 0; 948 949 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 950 } 951 case PHY_VITESSE_VSC8211: 952 default: { 953 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 954 return 0; 955 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 956 } 957 } 958 } 959 960 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 961 { 962 u16 reg; 963 964 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 965 return 0; 966 967 return (reg & PHY_NEG_PAUSE) != 0; 968 } 969 970 static int PHY_Setup(struct ql3_adapter *qdev) 971 { 972 u16 reg1; 973 u16 reg2; 974 bool agereAddrChangeNeeded = false; 975 u32 miiAddr = 0; 976 int err; 977 978 /* Determine the PHY we are using by reading the ID's */ 979 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 980 if (err != 0) { 981 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 982 return err; 983 } 984 985 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 986 if (err != 0) { 987 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 988 return err; 989 } 990 991 /* Check if we have a Agere PHY */ 992 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 993 994 /* Determine which MII address we should be using 995 determined by the index of the card */ 996 if (qdev->mac_index == 0) 997 miiAddr = MII_AGERE_ADDR_1; 998 else 999 miiAddr = MII_AGERE_ADDR_2; 1000 1001 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1002 if (err != 0) { 1003 netdev_err(qdev->ndev, 1004 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1005 return err; 1006 } 1007 1008 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1009 if (err != 0) { 1010 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1011 return err; 1012 } 1013 1014 /* We need to remember to initialize the Agere PHY */ 1015 agereAddrChangeNeeded = true; 1016 } 1017 1018 /* Determine the particular PHY we have on board to apply 1019 PHY specific initializations */ 1020 qdev->phyType = getPhyType(qdev, reg1, reg2); 1021 1022 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1023 /* need this here so address gets changed */ 1024 phyAgereSpecificInit(qdev, miiAddr); 1025 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1026 netdev_err(qdev->ndev, "PHY is unknown\n"); 1027 return -EIO; 1028 } 1029 1030 return 0; 1031 } 1032 1033 /* 1034 * Caller holds hw_lock. 1035 */ 1036 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1037 { 1038 struct ql3xxx_port_registers __iomem *port_regs = 1039 qdev->mem_map_registers; 1040 u32 value; 1041 1042 if (enable) 1043 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1044 else 1045 value = (MAC_CONFIG_REG_PE << 16); 1046 1047 if (qdev->mac_index) 1048 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1049 else 1050 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1051 } 1052 1053 /* 1054 * Caller holds hw_lock. 1055 */ 1056 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1057 { 1058 struct ql3xxx_port_registers __iomem *port_regs = 1059 qdev->mem_map_registers; 1060 u32 value; 1061 1062 if (enable) 1063 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1064 else 1065 value = (MAC_CONFIG_REG_SR << 16); 1066 1067 if (qdev->mac_index) 1068 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1069 else 1070 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1071 } 1072 1073 /* 1074 * Caller holds hw_lock. 1075 */ 1076 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1077 { 1078 struct ql3xxx_port_registers __iomem *port_regs = 1079 qdev->mem_map_registers; 1080 u32 value; 1081 1082 if (enable) 1083 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1084 else 1085 value = (MAC_CONFIG_REG_GM << 16); 1086 1087 if (qdev->mac_index) 1088 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1089 else 1090 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1091 } 1092 1093 /* 1094 * Caller holds hw_lock. 1095 */ 1096 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1097 { 1098 struct ql3xxx_port_registers __iomem *port_regs = 1099 qdev->mem_map_registers; 1100 u32 value; 1101 1102 if (enable) 1103 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1104 else 1105 value = (MAC_CONFIG_REG_FD << 16); 1106 1107 if (qdev->mac_index) 1108 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1109 else 1110 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1111 } 1112 1113 /* 1114 * Caller holds hw_lock. 1115 */ 1116 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1117 { 1118 struct ql3xxx_port_registers __iomem *port_regs = 1119 qdev->mem_map_registers; 1120 u32 value; 1121 1122 if (enable) 1123 value = 1124 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1125 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1126 else 1127 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1128 1129 if (qdev->mac_index) 1130 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1131 else 1132 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1133 } 1134 1135 /* 1136 * Caller holds hw_lock. 1137 */ 1138 static int ql_is_fiber(struct ql3_adapter *qdev) 1139 { 1140 struct ql3xxx_port_registers __iomem *port_regs = 1141 qdev->mem_map_registers; 1142 u32 bitToCheck = 0; 1143 u32 temp; 1144 1145 switch (qdev->mac_index) { 1146 case 0: 1147 bitToCheck = PORT_STATUS_SM0; 1148 break; 1149 case 1: 1150 bitToCheck = PORT_STATUS_SM1; 1151 break; 1152 } 1153 1154 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1155 return (temp & bitToCheck) != 0; 1156 } 1157 1158 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1159 { 1160 u16 reg; 1161 ql_mii_read_reg(qdev, 0x00, ®); 1162 return (reg & 0x1000) != 0; 1163 } 1164 1165 /* 1166 * Caller holds hw_lock. 1167 */ 1168 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1169 { 1170 struct ql3xxx_port_registers __iomem *port_regs = 1171 qdev->mem_map_registers; 1172 u32 bitToCheck = 0; 1173 u32 temp; 1174 1175 switch (qdev->mac_index) { 1176 case 0: 1177 bitToCheck = PORT_STATUS_AC0; 1178 break; 1179 case 1: 1180 bitToCheck = PORT_STATUS_AC1; 1181 break; 1182 } 1183 1184 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1185 if (temp & bitToCheck) { 1186 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1187 return 1; 1188 } 1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1190 return 0; 1191 } 1192 1193 /* 1194 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1195 */ 1196 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1197 { 1198 if (ql_is_fiber(qdev)) 1199 return ql_is_petbi_neg_pause(qdev); 1200 else 1201 return ql_is_phy_neg_pause(qdev); 1202 } 1203 1204 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1205 { 1206 struct ql3xxx_port_registers __iomem *port_regs = 1207 qdev->mem_map_registers; 1208 u32 bitToCheck = 0; 1209 u32 temp; 1210 1211 switch (qdev->mac_index) { 1212 case 0: 1213 bitToCheck = PORT_STATUS_AE0; 1214 break; 1215 case 1: 1216 bitToCheck = PORT_STATUS_AE1; 1217 break; 1218 } 1219 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1220 return (temp & bitToCheck) != 0; 1221 } 1222 1223 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1224 { 1225 if (ql_is_fiber(qdev)) 1226 return SPEED_1000; 1227 else 1228 return ql_phy_get_speed(qdev); 1229 } 1230 1231 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1232 { 1233 if (ql_is_fiber(qdev)) 1234 return 1; 1235 else 1236 return ql_is_full_dup(qdev); 1237 } 1238 1239 /* 1240 * Caller holds hw_lock. 1241 */ 1242 static int ql_link_down_detect(struct ql3_adapter *qdev) 1243 { 1244 struct ql3xxx_port_registers __iomem *port_regs = 1245 qdev->mem_map_registers; 1246 u32 bitToCheck = 0; 1247 u32 temp; 1248 1249 switch (qdev->mac_index) { 1250 case 0: 1251 bitToCheck = ISP_CONTROL_LINK_DN_0; 1252 break; 1253 case 1: 1254 bitToCheck = ISP_CONTROL_LINK_DN_1; 1255 break; 1256 } 1257 1258 temp = 1259 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1260 return (temp & bitToCheck) != 0; 1261 } 1262 1263 /* 1264 * Caller holds hw_lock. 1265 */ 1266 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1267 { 1268 struct ql3xxx_port_registers __iomem *port_regs = 1269 qdev->mem_map_registers; 1270 1271 switch (qdev->mac_index) { 1272 case 0: 1273 ql_write_common_reg(qdev, 1274 &port_regs->CommonRegs.ispControlStatus, 1275 (ISP_CONTROL_LINK_DN_0) | 1276 (ISP_CONTROL_LINK_DN_0 << 16)); 1277 break; 1278 1279 case 1: 1280 ql_write_common_reg(qdev, 1281 &port_regs->CommonRegs.ispControlStatus, 1282 (ISP_CONTROL_LINK_DN_1) | 1283 (ISP_CONTROL_LINK_DN_1 << 16)); 1284 break; 1285 1286 default: 1287 return 1; 1288 } 1289 1290 return 0; 1291 } 1292 1293 /* 1294 * Caller holds hw_lock. 1295 */ 1296 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1297 { 1298 struct ql3xxx_port_registers __iomem *port_regs = 1299 qdev->mem_map_registers; 1300 u32 bitToCheck = 0; 1301 u32 temp; 1302 1303 switch (qdev->mac_index) { 1304 case 0: 1305 bitToCheck = PORT_STATUS_F1_ENABLED; 1306 break; 1307 case 1: 1308 bitToCheck = PORT_STATUS_F3_ENABLED; 1309 break; 1310 default: 1311 break; 1312 } 1313 1314 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1315 if (temp & bitToCheck) { 1316 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1317 "not link master\n"); 1318 return 0; 1319 } 1320 1321 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1322 return 1; 1323 } 1324 1325 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1326 { 1327 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1328 PHYAddr[qdev->mac_index]); 1329 } 1330 1331 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1332 { 1333 u16 reg; 1334 u16 portConfiguration; 1335 1336 if (qdev->phyType == PHY_AGERE_ET1011C) 1337 ql_mii_write_reg(qdev, 0x13, 0x0000); 1338 /* turn off external loopback */ 1339 1340 if (qdev->mac_index == 0) 1341 portConfiguration = 1342 qdev->nvram_data.macCfg_port0.portConfiguration; 1343 else 1344 portConfiguration = 1345 qdev->nvram_data.macCfg_port1.portConfiguration; 1346 1347 /* Some HBA's in the field are set to 0 and they need to 1348 be reinterpreted with a default value */ 1349 if (portConfiguration == 0) 1350 portConfiguration = PORT_CONFIG_DEFAULT; 1351 1352 /* Set the 1000 advertisements */ 1353 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1354 PHYAddr[qdev->mac_index]); 1355 reg &= ~PHY_GIG_ALL_PARAMS; 1356 1357 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1358 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1359 reg |= PHY_GIG_ADV_1000F; 1360 else 1361 reg |= PHY_GIG_ADV_1000H; 1362 } 1363 1364 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1365 PHYAddr[qdev->mac_index]); 1366 1367 /* Set the 10/100 & pause negotiation advertisements */ 1368 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1369 PHYAddr[qdev->mac_index]); 1370 reg &= ~PHY_NEG_ALL_PARAMS; 1371 1372 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1373 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1374 1375 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1376 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1377 reg |= PHY_NEG_ADV_100F; 1378 1379 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1380 reg |= PHY_NEG_ADV_10F; 1381 } 1382 1383 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1384 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1385 reg |= PHY_NEG_ADV_100H; 1386 1387 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1388 reg |= PHY_NEG_ADV_10H; 1389 } 1390 1391 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1392 reg |= 1; 1393 1394 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1395 PHYAddr[qdev->mac_index]); 1396 1397 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1398 1399 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1400 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1401 PHYAddr[qdev->mac_index]); 1402 } 1403 1404 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1405 { 1406 ql_phy_reset_ex(qdev); 1407 PHY_Setup(qdev); 1408 ql_phy_start_neg_ex(qdev); 1409 } 1410 1411 /* 1412 * Caller holds hw_lock. 1413 */ 1414 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1415 { 1416 struct ql3xxx_port_registers __iomem *port_regs = 1417 qdev->mem_map_registers; 1418 u32 bitToCheck = 0; 1419 u32 temp, linkState; 1420 1421 switch (qdev->mac_index) { 1422 case 0: 1423 bitToCheck = PORT_STATUS_UP0; 1424 break; 1425 case 1: 1426 bitToCheck = PORT_STATUS_UP1; 1427 break; 1428 } 1429 1430 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1431 if (temp & bitToCheck) 1432 linkState = LS_UP; 1433 else 1434 linkState = LS_DOWN; 1435 1436 return linkState; 1437 } 1438 1439 static int ql_port_start(struct ql3_adapter *qdev) 1440 { 1441 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1442 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1443 2) << 7)) { 1444 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1445 return -1; 1446 } 1447 1448 if (ql_is_fiber(qdev)) { 1449 ql_petbi_init(qdev); 1450 } else { 1451 /* Copper port */ 1452 ql_phy_init_ex(qdev); 1453 } 1454 1455 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1456 return 0; 1457 } 1458 1459 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1460 { 1461 1462 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1463 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1464 2) << 7)) 1465 return -1; 1466 1467 if (!ql_auto_neg_error(qdev)) { 1468 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1469 /* configure the MAC */ 1470 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1471 "Configuring link\n"); 1472 ql_mac_cfg_soft_reset(qdev, 1); 1473 ql_mac_cfg_gig(qdev, 1474 (ql_get_link_speed 1475 (qdev) == 1476 SPEED_1000)); 1477 ql_mac_cfg_full_dup(qdev, 1478 ql_is_link_full_dup 1479 (qdev)); 1480 ql_mac_cfg_pause(qdev, 1481 ql_is_neg_pause 1482 (qdev)); 1483 ql_mac_cfg_soft_reset(qdev, 0); 1484 1485 /* enable the MAC */ 1486 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1487 "Enabling mac\n"); 1488 ql_mac_enable(qdev, 1); 1489 } 1490 1491 qdev->port_link_state = LS_UP; 1492 netif_start_queue(qdev->ndev); 1493 netif_carrier_on(qdev->ndev); 1494 netif_info(qdev, link, qdev->ndev, 1495 "Link is up at %d Mbps, %s duplex\n", 1496 ql_get_link_speed(qdev), 1497 ql_is_link_full_dup(qdev) ? "full" : "half"); 1498 1499 } else { /* Remote error detected */ 1500 1501 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1502 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1503 "Remote error detected. Calling ql_port_start()\n"); 1504 /* 1505 * ql_port_start() is shared code and needs 1506 * to lock the PHY on it's own. 1507 */ 1508 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1509 if (ql_port_start(qdev)) /* Restart port */ 1510 return -1; 1511 return 0; 1512 } 1513 } 1514 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1515 return 0; 1516 } 1517 1518 static void ql_link_state_machine_work(struct work_struct *work) 1519 { 1520 struct ql3_adapter *qdev = 1521 container_of(work, struct ql3_adapter, link_state_work.work); 1522 1523 u32 curr_link_state; 1524 unsigned long hw_flags; 1525 1526 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1527 1528 curr_link_state = ql_get_link_state(qdev); 1529 1530 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1531 netif_info(qdev, link, qdev->ndev, 1532 "Reset in progress, skip processing link state\n"); 1533 1534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1535 1536 /* Restart timer on 2 second interval. */ 1537 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1538 1539 return; 1540 } 1541 1542 switch (qdev->port_link_state) { 1543 default: 1544 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1545 ql_port_start(qdev); 1546 qdev->port_link_state = LS_DOWN; 1547 /* Fall Through */ 1548 1549 case LS_DOWN: 1550 if (curr_link_state == LS_UP) { 1551 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1552 if (ql_is_auto_neg_complete(qdev)) 1553 ql_finish_auto_neg(qdev); 1554 1555 if (qdev->port_link_state == LS_UP) 1556 ql_link_down_detect_clear(qdev); 1557 1558 qdev->port_link_state = LS_UP; 1559 } 1560 break; 1561 1562 case LS_UP: 1563 /* 1564 * See if the link is currently down or went down and came 1565 * back up 1566 */ 1567 if (curr_link_state == LS_DOWN) { 1568 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1569 qdev->port_link_state = LS_DOWN; 1570 } 1571 if (ql_link_down_detect(qdev)) 1572 qdev->port_link_state = LS_DOWN; 1573 break; 1574 } 1575 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1576 1577 /* Restart timer on 2 second interval. */ 1578 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1579 } 1580 1581 /* 1582 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1583 */ 1584 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1585 { 1586 if (ql_this_adapter_controls_port(qdev)) 1587 set_bit(QL_LINK_MASTER, &qdev->flags); 1588 else 1589 clear_bit(QL_LINK_MASTER, &qdev->flags); 1590 } 1591 1592 /* 1593 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1594 */ 1595 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1596 { 1597 ql_mii_enable_scan_mode(qdev); 1598 1599 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1600 if (ql_this_adapter_controls_port(qdev)) 1601 ql_petbi_init_ex(qdev); 1602 } else { 1603 if (ql_this_adapter_controls_port(qdev)) 1604 ql_phy_init_ex(qdev); 1605 } 1606 } 1607 1608 /* 1609 * MII_Setup needs to be called before taking the PHY out of reset 1610 * so that the management interface clock speed can be set properly. 1611 * It would be better if we had a way to disable MDC until after the 1612 * PHY is out of reset, but we don't have that capability. 1613 */ 1614 static int ql_mii_setup(struct ql3_adapter *qdev) 1615 { 1616 u32 reg; 1617 struct ql3xxx_port_registers __iomem *port_regs = 1618 qdev->mem_map_registers; 1619 1620 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1621 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1622 2) << 7)) 1623 return -1; 1624 1625 if (qdev->device_id == QL3032_DEVICE_ID) 1626 ql_write_page0_reg(qdev, 1627 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1628 1629 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1630 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1631 1632 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1633 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1634 1635 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1636 return 0; 1637 } 1638 1639 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1640 SUPPORTED_FIBRE | \ 1641 SUPPORTED_Autoneg) 1642 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1643 SUPPORTED_10baseT_Full | \ 1644 SUPPORTED_100baseT_Half | \ 1645 SUPPORTED_100baseT_Full | \ 1646 SUPPORTED_1000baseT_Half | \ 1647 SUPPORTED_1000baseT_Full | \ 1648 SUPPORTED_Autoneg | \ 1649 SUPPORTED_TP) \ 1650 1651 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1652 { 1653 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1654 return SUPPORTED_OPTICAL_MODES; 1655 1656 return SUPPORTED_TP_MODES; 1657 } 1658 1659 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1660 { 1661 int status; 1662 unsigned long hw_flags; 1663 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1664 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1665 (QL_RESOURCE_BITS_BASE_CODE | 1666 (qdev->mac_index) * 2) << 7)) { 1667 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1668 return 0; 1669 } 1670 status = ql_is_auto_cfg(qdev); 1671 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1672 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1673 return status; 1674 } 1675 1676 static u32 ql_get_speed(struct ql3_adapter *qdev) 1677 { 1678 u32 status; 1679 unsigned long hw_flags; 1680 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1681 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1682 (QL_RESOURCE_BITS_BASE_CODE | 1683 (qdev->mac_index) * 2) << 7)) { 1684 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1685 return 0; 1686 } 1687 status = ql_get_link_speed(qdev); 1688 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1689 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1690 return status; 1691 } 1692 1693 static int ql_get_full_dup(struct ql3_adapter *qdev) 1694 { 1695 int status; 1696 unsigned long hw_flags; 1697 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1698 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1699 (QL_RESOURCE_BITS_BASE_CODE | 1700 (qdev->mac_index) * 2) << 7)) { 1701 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1702 return 0; 1703 } 1704 status = ql_is_link_full_dup(qdev); 1705 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1706 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1707 return status; 1708 } 1709 1710 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1711 { 1712 struct ql3_adapter *qdev = netdev_priv(ndev); 1713 1714 ecmd->transceiver = XCVR_INTERNAL; 1715 ecmd->supported = ql_supported_modes(qdev); 1716 1717 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1718 ecmd->port = PORT_FIBRE; 1719 } else { 1720 ecmd->port = PORT_TP; 1721 ecmd->phy_address = qdev->PHYAddr; 1722 } 1723 ecmd->advertising = ql_supported_modes(qdev); 1724 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1725 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1726 ecmd->duplex = ql_get_full_dup(qdev); 1727 return 0; 1728 } 1729 1730 static void ql_get_drvinfo(struct net_device *ndev, 1731 struct ethtool_drvinfo *drvinfo) 1732 { 1733 struct ql3_adapter *qdev = netdev_priv(ndev); 1734 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1735 strlcpy(drvinfo->version, ql3xxx_driver_version, 1736 sizeof(drvinfo->version)); 1737 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1738 sizeof(drvinfo->bus_info)); 1739 } 1740 1741 static u32 ql_get_msglevel(struct net_device *ndev) 1742 { 1743 struct ql3_adapter *qdev = netdev_priv(ndev); 1744 return qdev->msg_enable; 1745 } 1746 1747 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1748 { 1749 struct ql3_adapter *qdev = netdev_priv(ndev); 1750 qdev->msg_enable = value; 1751 } 1752 1753 static void ql_get_pauseparam(struct net_device *ndev, 1754 struct ethtool_pauseparam *pause) 1755 { 1756 struct ql3_adapter *qdev = netdev_priv(ndev); 1757 struct ql3xxx_port_registers __iomem *port_regs = 1758 qdev->mem_map_registers; 1759 1760 u32 reg; 1761 if (qdev->mac_index == 0) 1762 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1763 else 1764 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1765 1766 pause->autoneg = ql_get_auto_cfg_status(qdev); 1767 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1768 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1769 } 1770 1771 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1772 .get_settings = ql_get_settings, 1773 .get_drvinfo = ql_get_drvinfo, 1774 .get_link = ethtool_op_get_link, 1775 .get_msglevel = ql_get_msglevel, 1776 .set_msglevel = ql_set_msglevel, 1777 .get_pauseparam = ql_get_pauseparam, 1778 }; 1779 1780 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1781 { 1782 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1783 dma_addr_t map; 1784 int err; 1785 1786 while (lrg_buf_cb) { 1787 if (!lrg_buf_cb->skb) { 1788 lrg_buf_cb->skb = 1789 netdev_alloc_skb(qdev->ndev, 1790 qdev->lrg_buffer_len); 1791 if (unlikely(!lrg_buf_cb->skb)) { 1792 netdev_printk(KERN_DEBUG, qdev->ndev, 1793 "Failed netdev_alloc_skb()\n"); 1794 break; 1795 } else { 1796 /* 1797 * We save some space to copy the ethhdr from 1798 * first buffer 1799 */ 1800 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1801 map = pci_map_single(qdev->pdev, 1802 lrg_buf_cb->skb->data, 1803 qdev->lrg_buffer_len - 1804 QL_HEADER_SPACE, 1805 PCI_DMA_FROMDEVICE); 1806 1807 err = pci_dma_mapping_error(qdev->pdev, map); 1808 if (err) { 1809 netdev_err(qdev->ndev, 1810 "PCI mapping failed with error: %d\n", 1811 err); 1812 dev_kfree_skb(lrg_buf_cb->skb); 1813 lrg_buf_cb->skb = NULL; 1814 break; 1815 } 1816 1817 1818 lrg_buf_cb->buf_phy_addr_low = 1819 cpu_to_le32(LS_64BITS(map)); 1820 lrg_buf_cb->buf_phy_addr_high = 1821 cpu_to_le32(MS_64BITS(map)); 1822 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1823 dma_unmap_len_set(lrg_buf_cb, maplen, 1824 qdev->lrg_buffer_len - 1825 QL_HEADER_SPACE); 1826 --qdev->lrg_buf_skb_check; 1827 if (!qdev->lrg_buf_skb_check) 1828 return 1; 1829 } 1830 } 1831 lrg_buf_cb = lrg_buf_cb->next; 1832 } 1833 return 0; 1834 } 1835 1836 /* 1837 * Caller holds hw_lock. 1838 */ 1839 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1840 { 1841 struct ql3xxx_port_registers __iomem *port_regs = 1842 qdev->mem_map_registers; 1843 1844 if (qdev->small_buf_release_cnt >= 16) { 1845 while (qdev->small_buf_release_cnt >= 16) { 1846 qdev->small_buf_q_producer_index++; 1847 1848 if (qdev->small_buf_q_producer_index == 1849 NUM_SBUFQ_ENTRIES) 1850 qdev->small_buf_q_producer_index = 0; 1851 qdev->small_buf_release_cnt -= 8; 1852 } 1853 wmb(); 1854 writel(qdev->small_buf_q_producer_index, 1855 &port_regs->CommonRegs.rxSmallQProducerIndex); 1856 } 1857 } 1858 1859 /* 1860 * Caller holds hw_lock. 1861 */ 1862 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1863 { 1864 struct bufq_addr_element *lrg_buf_q_ele; 1865 int i; 1866 struct ql_rcv_buf_cb *lrg_buf_cb; 1867 struct ql3xxx_port_registers __iomem *port_regs = 1868 qdev->mem_map_registers; 1869 1870 if ((qdev->lrg_buf_free_count >= 8) && 1871 (qdev->lrg_buf_release_cnt >= 16)) { 1872 1873 if (qdev->lrg_buf_skb_check) 1874 if (!ql_populate_free_queue(qdev)) 1875 return; 1876 1877 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1878 1879 while ((qdev->lrg_buf_release_cnt >= 16) && 1880 (qdev->lrg_buf_free_count >= 8)) { 1881 1882 for (i = 0; i < 8; i++) { 1883 lrg_buf_cb = 1884 ql_get_from_lrg_buf_free_list(qdev); 1885 lrg_buf_q_ele->addr_high = 1886 lrg_buf_cb->buf_phy_addr_high; 1887 lrg_buf_q_ele->addr_low = 1888 lrg_buf_cb->buf_phy_addr_low; 1889 lrg_buf_q_ele++; 1890 1891 qdev->lrg_buf_release_cnt--; 1892 } 1893 1894 qdev->lrg_buf_q_producer_index++; 1895 1896 if (qdev->lrg_buf_q_producer_index == 1897 qdev->num_lbufq_entries) 1898 qdev->lrg_buf_q_producer_index = 0; 1899 1900 if (qdev->lrg_buf_q_producer_index == 1901 (qdev->num_lbufq_entries - 1)) { 1902 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1903 } 1904 } 1905 wmb(); 1906 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1907 writel(qdev->lrg_buf_q_producer_index, 1908 &port_regs->CommonRegs.rxLargeQProducerIndex); 1909 } 1910 } 1911 1912 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1913 struct ob_mac_iocb_rsp *mac_rsp) 1914 { 1915 struct ql_tx_buf_cb *tx_cb; 1916 int i; 1917 1918 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1919 netdev_warn(qdev->ndev, 1920 "Frame too short but it was padded and sent\n"); 1921 } 1922 1923 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1924 1925 /* Check the transmit response flags for any errors */ 1926 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1927 netdev_err(qdev->ndev, 1928 "Frame too short to be legal, frame not sent\n"); 1929 1930 qdev->ndev->stats.tx_errors++; 1931 goto frame_not_sent; 1932 } 1933 1934 if (tx_cb->seg_count == 0) { 1935 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1936 mac_rsp->transaction_id); 1937 1938 qdev->ndev->stats.tx_errors++; 1939 goto invalid_seg_count; 1940 } 1941 1942 pci_unmap_single(qdev->pdev, 1943 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1944 dma_unmap_len(&tx_cb->map[0], maplen), 1945 PCI_DMA_TODEVICE); 1946 tx_cb->seg_count--; 1947 if (tx_cb->seg_count) { 1948 for (i = 1; i < tx_cb->seg_count; i++) { 1949 pci_unmap_page(qdev->pdev, 1950 dma_unmap_addr(&tx_cb->map[i], 1951 mapaddr), 1952 dma_unmap_len(&tx_cb->map[i], maplen), 1953 PCI_DMA_TODEVICE); 1954 } 1955 } 1956 qdev->ndev->stats.tx_packets++; 1957 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1958 1959 frame_not_sent: 1960 dev_kfree_skb_irq(tx_cb->skb); 1961 tx_cb->skb = NULL; 1962 1963 invalid_seg_count: 1964 atomic_inc(&qdev->tx_count); 1965 } 1966 1967 static void ql_get_sbuf(struct ql3_adapter *qdev) 1968 { 1969 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1970 qdev->small_buf_index = 0; 1971 qdev->small_buf_release_cnt++; 1972 } 1973 1974 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1975 { 1976 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1977 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1978 qdev->lrg_buf_release_cnt++; 1979 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1980 qdev->lrg_buf_index = 0; 1981 return lrg_buf_cb; 1982 } 1983 1984 /* 1985 * The difference between 3022 and 3032 for inbound completions: 1986 * 3022 uses two buffers per completion. The first buffer contains 1987 * (some) header info, the second the remainder of the headers plus 1988 * the data. For this chip we reserve some space at the top of the 1989 * receive buffer so that the header info in buffer one can be 1990 * prepended to the buffer two. Buffer two is the sent up while 1991 * buffer one is returned to the hardware to be reused. 1992 * 3032 receives all of it's data and headers in one buffer for a 1993 * simpler process. 3032 also supports checksum verification as 1994 * can be seen in ql_process_macip_rx_intr(). 1995 */ 1996 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1997 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1998 { 1999 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2000 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2001 struct sk_buff *skb; 2002 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2003 2004 /* 2005 * Get the inbound address list (small buffer). 2006 */ 2007 ql_get_sbuf(qdev); 2008 2009 if (qdev->device_id == QL3022_DEVICE_ID) 2010 lrg_buf_cb1 = ql_get_lbuf(qdev); 2011 2012 /* start of second buffer */ 2013 lrg_buf_cb2 = ql_get_lbuf(qdev); 2014 skb = lrg_buf_cb2->skb; 2015 2016 qdev->ndev->stats.rx_packets++; 2017 qdev->ndev->stats.rx_bytes += length; 2018 2019 skb_put(skb, length); 2020 pci_unmap_single(qdev->pdev, 2021 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2022 dma_unmap_len(lrg_buf_cb2, maplen), 2023 PCI_DMA_FROMDEVICE); 2024 prefetch(skb->data); 2025 skb_checksum_none_assert(skb); 2026 skb->protocol = eth_type_trans(skb, qdev->ndev); 2027 2028 netif_receive_skb(skb); 2029 lrg_buf_cb2->skb = NULL; 2030 2031 if (qdev->device_id == QL3022_DEVICE_ID) 2032 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2033 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2034 } 2035 2036 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2037 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2038 { 2039 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2040 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2041 struct sk_buff *skb1 = NULL, *skb2; 2042 struct net_device *ndev = qdev->ndev; 2043 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2044 u16 size = 0; 2045 2046 /* 2047 * Get the inbound address list (small buffer). 2048 */ 2049 2050 ql_get_sbuf(qdev); 2051 2052 if (qdev->device_id == QL3022_DEVICE_ID) { 2053 /* start of first buffer on 3022 */ 2054 lrg_buf_cb1 = ql_get_lbuf(qdev); 2055 skb1 = lrg_buf_cb1->skb; 2056 size = ETH_HLEN; 2057 if (*((u16 *) skb1->data) != 0xFFFF) 2058 size += VLAN_ETH_HLEN - ETH_HLEN; 2059 } 2060 2061 /* start of second buffer */ 2062 lrg_buf_cb2 = ql_get_lbuf(qdev); 2063 skb2 = lrg_buf_cb2->skb; 2064 2065 skb_put(skb2, length); /* Just the second buffer length here. */ 2066 pci_unmap_single(qdev->pdev, 2067 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2068 dma_unmap_len(lrg_buf_cb2, maplen), 2069 PCI_DMA_FROMDEVICE); 2070 prefetch(skb2->data); 2071 2072 skb_checksum_none_assert(skb2); 2073 if (qdev->device_id == QL3022_DEVICE_ID) { 2074 /* 2075 * Copy the ethhdr from first buffer to second. This 2076 * is necessary for 3022 IP completions. 2077 */ 2078 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2079 skb_push(skb2, size), size); 2080 } else { 2081 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2082 if (checksum & 2083 (IB_IP_IOCB_RSP_3032_ICE | 2084 IB_IP_IOCB_RSP_3032_CE)) { 2085 netdev_err(ndev, 2086 "%s: Bad checksum for this %s packet, checksum = %x\n", 2087 __func__, 2088 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2089 "TCP" : "UDP"), checksum); 2090 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2091 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2092 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2093 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2094 } 2095 } 2096 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2097 2098 netif_receive_skb(skb2); 2099 ndev->stats.rx_packets++; 2100 ndev->stats.rx_bytes += length; 2101 lrg_buf_cb2->skb = NULL; 2102 2103 if (qdev->device_id == QL3022_DEVICE_ID) 2104 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2105 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2106 } 2107 2108 static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2109 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2110 { 2111 struct net_rsp_iocb *net_rsp; 2112 struct net_device *ndev = qdev->ndev; 2113 int work_done = 0; 2114 2115 /* While there are entries in the completion queue. */ 2116 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2117 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2118 2119 net_rsp = qdev->rsp_current; 2120 rmb(); 2121 /* 2122 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2123 * if the inbound completion is for a VLAN. 2124 */ 2125 if (qdev->device_id == QL3032_DEVICE_ID) 2126 net_rsp->opcode &= 0x7f; 2127 switch (net_rsp->opcode) { 2128 2129 case OPCODE_OB_MAC_IOCB_FN0: 2130 case OPCODE_OB_MAC_IOCB_FN2: 2131 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2132 net_rsp); 2133 (*tx_cleaned)++; 2134 break; 2135 2136 case OPCODE_IB_MAC_IOCB: 2137 case OPCODE_IB_3032_MAC_IOCB: 2138 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2139 net_rsp); 2140 (*rx_cleaned)++; 2141 break; 2142 2143 case OPCODE_IB_IP_IOCB: 2144 case OPCODE_IB_3032_IP_IOCB: 2145 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2146 net_rsp); 2147 (*rx_cleaned)++; 2148 break; 2149 default: { 2150 u32 *tmp = (u32 *)net_rsp; 2151 netdev_err(ndev, 2152 "Hit default case, not handled!\n" 2153 " dropping the packet, opcode = %x\n" 2154 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2155 net_rsp->opcode, 2156 (unsigned long int)tmp[0], 2157 (unsigned long int)tmp[1], 2158 (unsigned long int)tmp[2], 2159 (unsigned long int)tmp[3]); 2160 } 2161 } 2162 2163 qdev->rsp_consumer_index++; 2164 2165 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2166 qdev->rsp_consumer_index = 0; 2167 qdev->rsp_current = qdev->rsp_q_virt_addr; 2168 } else { 2169 qdev->rsp_current++; 2170 } 2171 2172 work_done = *tx_cleaned + *rx_cleaned; 2173 } 2174 2175 return work_done; 2176 } 2177 2178 static int ql_poll(struct napi_struct *napi, int budget) 2179 { 2180 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2181 int rx_cleaned = 0, tx_cleaned = 0; 2182 unsigned long hw_flags; 2183 struct ql3xxx_port_registers __iomem *port_regs = 2184 qdev->mem_map_registers; 2185 2186 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2187 2188 if (tx_cleaned + rx_cleaned != budget) { 2189 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2190 __napi_complete(napi); 2191 ql_update_small_bufq_prod_index(qdev); 2192 ql_update_lrg_bufq_prod_index(qdev); 2193 writel(qdev->rsp_consumer_index, 2194 &port_regs->CommonRegs.rspQConsumerIndex); 2195 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2196 2197 ql_enable_interrupts(qdev); 2198 } 2199 return tx_cleaned + rx_cleaned; 2200 } 2201 2202 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2203 { 2204 2205 struct net_device *ndev = dev_id; 2206 struct ql3_adapter *qdev = netdev_priv(ndev); 2207 struct ql3xxx_port_registers __iomem *port_regs = 2208 qdev->mem_map_registers; 2209 u32 value; 2210 int handled = 1; 2211 u32 var; 2212 2213 value = ql_read_common_reg_l(qdev, 2214 &port_regs->CommonRegs.ispControlStatus); 2215 2216 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2217 spin_lock(&qdev->adapter_lock); 2218 netif_stop_queue(qdev->ndev); 2219 netif_carrier_off(qdev->ndev); 2220 ql_disable_interrupts(qdev); 2221 qdev->port_link_state = LS_DOWN; 2222 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2223 2224 if (value & ISP_CONTROL_FE) { 2225 /* 2226 * Chip Fatal Error. 2227 */ 2228 var = 2229 ql_read_page0_reg_l(qdev, 2230 &port_regs->PortFatalErrStatus); 2231 netdev_warn(ndev, 2232 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2233 var); 2234 set_bit(QL_RESET_START, &qdev->flags) ; 2235 } else { 2236 /* 2237 * Soft Reset Requested. 2238 */ 2239 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2240 netdev_err(ndev, 2241 "Another function issued a reset to the chip. ISR value = %x\n", 2242 value); 2243 } 2244 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2245 spin_unlock(&qdev->adapter_lock); 2246 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2247 ql_disable_interrupts(qdev); 2248 if (likely(napi_schedule_prep(&qdev->napi))) 2249 __napi_schedule(&qdev->napi); 2250 } else 2251 return IRQ_NONE; 2252 2253 return IRQ_RETVAL(handled); 2254 } 2255 2256 /* 2257 * Get the total number of segments needed for the given number of fragments. 2258 * This is necessary because outbound address lists (OAL) will be used when 2259 * more than two frags are given. Each address list has 5 addr/len pairs. 2260 * The 5th pair in each OAL is used to point to the next OAL if more frags 2261 * are coming. That is why the frags:segment count ratio is not linear. 2262 */ 2263 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2264 { 2265 if (qdev->device_id == QL3022_DEVICE_ID) 2266 return 1; 2267 2268 if (frags <= 2) 2269 return frags + 1; 2270 else if (frags <= 6) 2271 return frags + 2; 2272 else if (frags <= 10) 2273 return frags + 3; 2274 else if (frags <= 14) 2275 return frags + 4; 2276 else if (frags <= 18) 2277 return frags + 5; 2278 return -1; 2279 } 2280 2281 static void ql_hw_csum_setup(const struct sk_buff *skb, 2282 struct ob_mac_iocb_req *mac_iocb_ptr) 2283 { 2284 const struct iphdr *ip = ip_hdr(skb); 2285 2286 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2287 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2288 2289 if (ip->protocol == IPPROTO_TCP) { 2290 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2291 OB_3032MAC_IOCB_REQ_IC; 2292 } else { 2293 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2294 OB_3032MAC_IOCB_REQ_IC; 2295 } 2296 2297 } 2298 2299 /* 2300 * Map the buffers for this transmit. 2301 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2302 */ 2303 static int ql_send_map(struct ql3_adapter *qdev, 2304 struct ob_mac_iocb_req *mac_iocb_ptr, 2305 struct ql_tx_buf_cb *tx_cb, 2306 struct sk_buff *skb) 2307 { 2308 struct oal *oal; 2309 struct oal_entry *oal_entry; 2310 int len = skb_headlen(skb); 2311 dma_addr_t map; 2312 int err; 2313 int completed_segs, i; 2314 int seg_cnt, seg = 0; 2315 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2316 2317 seg_cnt = tx_cb->seg_count; 2318 /* 2319 * Map the skb buffer first. 2320 */ 2321 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2322 2323 err = pci_dma_mapping_error(qdev->pdev, map); 2324 if (err) { 2325 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2326 err); 2327 2328 return NETDEV_TX_BUSY; 2329 } 2330 2331 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2332 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2333 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2334 oal_entry->len = cpu_to_le32(len); 2335 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2336 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2337 seg++; 2338 2339 if (seg_cnt == 1) { 2340 /* Terminate the last segment. */ 2341 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2342 return NETDEV_TX_OK; 2343 } 2344 oal = tx_cb->oal; 2345 for (completed_segs = 0; 2346 completed_segs < frag_cnt; 2347 completed_segs++, seg++) { 2348 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2349 oal_entry++; 2350 /* 2351 * Check for continuation requirements. 2352 * It's strange but necessary. 2353 * Continuation entry points to outbound address list. 2354 */ 2355 if ((seg == 2 && seg_cnt > 3) || 2356 (seg == 7 && seg_cnt > 8) || 2357 (seg == 12 && seg_cnt > 13) || 2358 (seg == 17 && seg_cnt > 18)) { 2359 map = pci_map_single(qdev->pdev, oal, 2360 sizeof(struct oal), 2361 PCI_DMA_TODEVICE); 2362 2363 err = pci_dma_mapping_error(qdev->pdev, map); 2364 if (err) { 2365 netdev_err(qdev->ndev, 2366 "PCI mapping outbound address list with error: %d\n", 2367 err); 2368 goto map_error; 2369 } 2370 2371 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2372 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2373 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2374 OAL_CONT_ENTRY); 2375 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2376 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2377 sizeof(struct oal)); 2378 oal_entry = (struct oal_entry *)oal; 2379 oal++; 2380 seg++; 2381 } 2382 2383 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2384 DMA_TO_DEVICE); 2385 2386 err = dma_mapping_error(&qdev->pdev->dev, map); 2387 if (err) { 2388 netdev_err(qdev->ndev, 2389 "PCI mapping frags failed with error: %d\n", 2390 err); 2391 goto map_error; 2392 } 2393 2394 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2395 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2396 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2397 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2398 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2399 } 2400 /* Terminate the last segment. */ 2401 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2402 return NETDEV_TX_OK; 2403 2404 map_error: 2405 /* A PCI mapping failed and now we will need to back out 2406 * We need to traverse through the oal's and associated pages which 2407 * have been mapped and now we must unmap them to clean up properly 2408 */ 2409 2410 seg = 1; 2411 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2412 oal = tx_cb->oal; 2413 for (i = 0; i < completed_segs; i++, seg++) { 2414 oal_entry++; 2415 2416 /* 2417 * Check for continuation requirements. 2418 * It's strange but necessary. 2419 */ 2420 2421 if ((seg == 2 && seg_cnt > 3) || 2422 (seg == 7 && seg_cnt > 8) || 2423 (seg == 12 && seg_cnt > 13) || 2424 (seg == 17 && seg_cnt > 18)) { 2425 pci_unmap_single(qdev->pdev, 2426 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2427 dma_unmap_len(&tx_cb->map[seg], maplen), 2428 PCI_DMA_TODEVICE); 2429 oal++; 2430 seg++; 2431 } 2432 2433 pci_unmap_page(qdev->pdev, 2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2435 dma_unmap_len(&tx_cb->map[seg], maplen), 2436 PCI_DMA_TODEVICE); 2437 } 2438 2439 pci_unmap_single(qdev->pdev, 2440 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2441 dma_unmap_addr(&tx_cb->map[0], maplen), 2442 PCI_DMA_TODEVICE); 2443 2444 return NETDEV_TX_BUSY; 2445 2446 } 2447 2448 /* 2449 * The difference between 3022 and 3032 sends: 2450 * 3022 only supports a simple single segment transmission. 2451 * 3032 supports checksumming and scatter/gather lists (fragments). 2452 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2453 * in the IOCB plus a chain of outbound address lists (OAL) that 2454 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2455 * will be used to point to an OAL when more ALP entries are required. 2456 * The IOCB is always the top of the chain followed by one or more 2457 * OALs (when necessary). 2458 */ 2459 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2460 struct net_device *ndev) 2461 { 2462 struct ql3_adapter *qdev = netdev_priv(ndev); 2463 struct ql3xxx_port_registers __iomem *port_regs = 2464 qdev->mem_map_registers; 2465 struct ql_tx_buf_cb *tx_cb; 2466 u32 tot_len = skb->len; 2467 struct ob_mac_iocb_req *mac_iocb_ptr; 2468 2469 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2470 return NETDEV_TX_BUSY; 2471 2472 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2473 tx_cb->seg_count = ql_get_seg_count(qdev, 2474 skb_shinfo(skb)->nr_frags); 2475 if (tx_cb->seg_count == -1) { 2476 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2477 return NETDEV_TX_OK; 2478 } 2479 2480 mac_iocb_ptr = tx_cb->queue_entry; 2481 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2482 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2483 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2484 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2485 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2486 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2487 tx_cb->skb = skb; 2488 if (qdev->device_id == QL3032_DEVICE_ID && 2489 skb->ip_summed == CHECKSUM_PARTIAL) 2490 ql_hw_csum_setup(skb, mac_iocb_ptr); 2491 2492 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2493 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2494 return NETDEV_TX_BUSY; 2495 } 2496 2497 wmb(); 2498 qdev->req_producer_index++; 2499 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2500 qdev->req_producer_index = 0; 2501 wmb(); 2502 ql_write_common_reg_l(qdev, 2503 &port_regs->CommonRegs.reqQProducerIndex, 2504 qdev->req_producer_index); 2505 2506 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2507 "tx queued, slot %d, len %d\n", 2508 qdev->req_producer_index, skb->len); 2509 2510 atomic_dec(&qdev->tx_count); 2511 return NETDEV_TX_OK; 2512 } 2513 2514 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2515 { 2516 qdev->req_q_size = 2517 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2518 2519 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2520 2521 /* The barrier is required to ensure request and response queue 2522 * addr writes to the registers. 2523 */ 2524 wmb(); 2525 2526 qdev->req_q_virt_addr = 2527 pci_alloc_consistent(qdev->pdev, 2528 (size_t) qdev->req_q_size, 2529 &qdev->req_q_phy_addr); 2530 2531 if ((qdev->req_q_virt_addr == NULL) || 2532 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2533 netdev_err(qdev->ndev, "reqQ failed\n"); 2534 return -ENOMEM; 2535 } 2536 2537 qdev->rsp_q_virt_addr = 2538 pci_alloc_consistent(qdev->pdev, 2539 (size_t) qdev->rsp_q_size, 2540 &qdev->rsp_q_phy_addr); 2541 2542 if ((qdev->rsp_q_virt_addr == NULL) || 2543 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2544 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2545 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2546 qdev->req_q_virt_addr, 2547 qdev->req_q_phy_addr); 2548 return -ENOMEM; 2549 } 2550 2551 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2552 2553 return 0; 2554 } 2555 2556 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2557 { 2558 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2559 netdev_info(qdev->ndev, "Already done\n"); 2560 return; 2561 } 2562 2563 pci_free_consistent(qdev->pdev, 2564 qdev->req_q_size, 2565 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2566 2567 qdev->req_q_virt_addr = NULL; 2568 2569 pci_free_consistent(qdev->pdev, 2570 qdev->rsp_q_size, 2571 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2572 2573 qdev->rsp_q_virt_addr = NULL; 2574 2575 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2576 } 2577 2578 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2579 { 2580 /* Create Large Buffer Queue */ 2581 qdev->lrg_buf_q_size = 2582 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2583 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2584 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2585 else 2586 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2587 2588 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2589 sizeof(struct ql_rcv_buf_cb), 2590 GFP_KERNEL); 2591 if (qdev->lrg_buf == NULL) 2592 return -ENOMEM; 2593 2594 qdev->lrg_buf_q_alloc_virt_addr = 2595 pci_alloc_consistent(qdev->pdev, 2596 qdev->lrg_buf_q_alloc_size, 2597 &qdev->lrg_buf_q_alloc_phy_addr); 2598 2599 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2600 netdev_err(qdev->ndev, "lBufQ failed\n"); 2601 return -ENOMEM; 2602 } 2603 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2604 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2605 2606 /* Create Small Buffer Queue */ 2607 qdev->small_buf_q_size = 2608 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2609 if (qdev->small_buf_q_size < PAGE_SIZE) 2610 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2611 else 2612 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2613 2614 qdev->small_buf_q_alloc_virt_addr = 2615 pci_alloc_consistent(qdev->pdev, 2616 qdev->small_buf_q_alloc_size, 2617 &qdev->small_buf_q_alloc_phy_addr); 2618 2619 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2620 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2621 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2622 qdev->lrg_buf_q_alloc_virt_addr, 2623 qdev->lrg_buf_q_alloc_phy_addr); 2624 return -ENOMEM; 2625 } 2626 2627 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2628 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2629 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2630 return 0; 2631 } 2632 2633 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2634 { 2635 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2636 netdev_info(qdev->ndev, "Already done\n"); 2637 return; 2638 } 2639 kfree(qdev->lrg_buf); 2640 pci_free_consistent(qdev->pdev, 2641 qdev->lrg_buf_q_alloc_size, 2642 qdev->lrg_buf_q_alloc_virt_addr, 2643 qdev->lrg_buf_q_alloc_phy_addr); 2644 2645 qdev->lrg_buf_q_virt_addr = NULL; 2646 2647 pci_free_consistent(qdev->pdev, 2648 qdev->small_buf_q_alloc_size, 2649 qdev->small_buf_q_alloc_virt_addr, 2650 qdev->small_buf_q_alloc_phy_addr); 2651 2652 qdev->small_buf_q_virt_addr = NULL; 2653 2654 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2655 } 2656 2657 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2658 { 2659 int i; 2660 struct bufq_addr_element *small_buf_q_entry; 2661 2662 /* Currently we allocate on one of memory and use it for smallbuffers */ 2663 qdev->small_buf_total_size = 2664 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2665 QL_SMALL_BUFFER_SIZE); 2666 2667 qdev->small_buf_virt_addr = 2668 pci_alloc_consistent(qdev->pdev, 2669 qdev->small_buf_total_size, 2670 &qdev->small_buf_phy_addr); 2671 2672 if (qdev->small_buf_virt_addr == NULL) { 2673 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2674 return -ENOMEM; 2675 } 2676 2677 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2678 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2679 2680 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2681 2682 /* Initialize the small buffer queue. */ 2683 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2684 small_buf_q_entry->addr_high = 2685 cpu_to_le32(qdev->small_buf_phy_addr_high); 2686 small_buf_q_entry->addr_low = 2687 cpu_to_le32(qdev->small_buf_phy_addr_low + 2688 (i * QL_SMALL_BUFFER_SIZE)); 2689 small_buf_q_entry++; 2690 } 2691 qdev->small_buf_index = 0; 2692 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2693 return 0; 2694 } 2695 2696 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2697 { 2698 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2699 netdev_info(qdev->ndev, "Already done\n"); 2700 return; 2701 } 2702 if (qdev->small_buf_virt_addr != NULL) { 2703 pci_free_consistent(qdev->pdev, 2704 qdev->small_buf_total_size, 2705 qdev->small_buf_virt_addr, 2706 qdev->small_buf_phy_addr); 2707 2708 qdev->small_buf_virt_addr = NULL; 2709 } 2710 } 2711 2712 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2713 { 2714 int i = 0; 2715 struct ql_rcv_buf_cb *lrg_buf_cb; 2716 2717 for (i = 0; i < qdev->num_large_buffers; i++) { 2718 lrg_buf_cb = &qdev->lrg_buf[i]; 2719 if (lrg_buf_cb->skb) { 2720 dev_kfree_skb(lrg_buf_cb->skb); 2721 pci_unmap_single(qdev->pdev, 2722 dma_unmap_addr(lrg_buf_cb, mapaddr), 2723 dma_unmap_len(lrg_buf_cb, maplen), 2724 PCI_DMA_FROMDEVICE); 2725 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2726 } else { 2727 break; 2728 } 2729 } 2730 } 2731 2732 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2733 { 2734 int i; 2735 struct ql_rcv_buf_cb *lrg_buf_cb; 2736 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2737 2738 for (i = 0; i < qdev->num_large_buffers; i++) { 2739 lrg_buf_cb = &qdev->lrg_buf[i]; 2740 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2741 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2742 buf_addr_ele++; 2743 } 2744 qdev->lrg_buf_index = 0; 2745 qdev->lrg_buf_skb_check = 0; 2746 } 2747 2748 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2749 { 2750 int i; 2751 struct ql_rcv_buf_cb *lrg_buf_cb; 2752 struct sk_buff *skb; 2753 dma_addr_t map; 2754 int err; 2755 2756 for (i = 0; i < qdev->num_large_buffers; i++) { 2757 skb = netdev_alloc_skb(qdev->ndev, 2758 qdev->lrg_buffer_len); 2759 if (unlikely(!skb)) { 2760 /* Better luck next round */ 2761 netdev_err(qdev->ndev, 2762 "large buff alloc failed for %d bytes at index %d\n", 2763 qdev->lrg_buffer_len * 2, i); 2764 ql_free_large_buffers(qdev); 2765 return -ENOMEM; 2766 } else { 2767 2768 lrg_buf_cb = &qdev->lrg_buf[i]; 2769 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2770 lrg_buf_cb->index = i; 2771 lrg_buf_cb->skb = skb; 2772 /* 2773 * We save some space to copy the ethhdr from first 2774 * buffer 2775 */ 2776 skb_reserve(skb, QL_HEADER_SPACE); 2777 map = pci_map_single(qdev->pdev, 2778 skb->data, 2779 qdev->lrg_buffer_len - 2780 QL_HEADER_SPACE, 2781 PCI_DMA_FROMDEVICE); 2782 2783 err = pci_dma_mapping_error(qdev->pdev, map); 2784 if (err) { 2785 netdev_err(qdev->ndev, 2786 "PCI mapping failed with error: %d\n", 2787 err); 2788 ql_free_large_buffers(qdev); 2789 return -ENOMEM; 2790 } 2791 2792 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2793 dma_unmap_len_set(lrg_buf_cb, maplen, 2794 qdev->lrg_buffer_len - 2795 QL_HEADER_SPACE); 2796 lrg_buf_cb->buf_phy_addr_low = 2797 cpu_to_le32(LS_64BITS(map)); 2798 lrg_buf_cb->buf_phy_addr_high = 2799 cpu_to_le32(MS_64BITS(map)); 2800 } 2801 } 2802 return 0; 2803 } 2804 2805 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2806 { 2807 struct ql_tx_buf_cb *tx_cb; 2808 int i; 2809 2810 tx_cb = &qdev->tx_buf[0]; 2811 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2812 kfree(tx_cb->oal); 2813 tx_cb->oal = NULL; 2814 tx_cb++; 2815 } 2816 } 2817 2818 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2819 { 2820 struct ql_tx_buf_cb *tx_cb; 2821 int i; 2822 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2823 2824 /* Create free list of transmit buffers */ 2825 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2826 2827 tx_cb = &qdev->tx_buf[i]; 2828 tx_cb->skb = NULL; 2829 tx_cb->queue_entry = req_q_curr; 2830 req_q_curr++; 2831 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2832 if (tx_cb->oal == NULL) 2833 return -ENOMEM; 2834 } 2835 return 0; 2836 } 2837 2838 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2839 { 2840 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2841 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2842 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2843 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2844 /* 2845 * Bigger buffers, so less of them. 2846 */ 2847 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2848 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2849 } else { 2850 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2851 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2852 return -ENOMEM; 2853 } 2854 qdev->num_large_buffers = 2855 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2856 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2857 qdev->max_frame_size = 2858 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2859 2860 /* 2861 * First allocate a page of shared memory and use it for shadow 2862 * locations of Network Request Queue Consumer Address Register and 2863 * Network Completion Queue Producer Index Register 2864 */ 2865 qdev->shadow_reg_virt_addr = 2866 pci_alloc_consistent(qdev->pdev, 2867 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2868 2869 if (qdev->shadow_reg_virt_addr != NULL) { 2870 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2871 qdev->req_consumer_index_phy_addr_high = 2872 MS_64BITS(qdev->shadow_reg_phy_addr); 2873 qdev->req_consumer_index_phy_addr_low = 2874 LS_64BITS(qdev->shadow_reg_phy_addr); 2875 2876 qdev->prsp_producer_index = 2877 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2878 qdev->rsp_producer_index_phy_addr_high = 2879 qdev->req_consumer_index_phy_addr_high; 2880 qdev->rsp_producer_index_phy_addr_low = 2881 qdev->req_consumer_index_phy_addr_low + 8; 2882 } else { 2883 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2884 return -ENOMEM; 2885 } 2886 2887 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2888 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2889 goto err_req_rsp; 2890 } 2891 2892 if (ql_alloc_buffer_queues(qdev) != 0) { 2893 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2894 goto err_buffer_queues; 2895 } 2896 2897 if (ql_alloc_small_buffers(qdev) != 0) { 2898 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2899 goto err_small_buffers; 2900 } 2901 2902 if (ql_alloc_large_buffers(qdev) != 0) { 2903 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2904 goto err_small_buffers; 2905 } 2906 2907 /* Initialize the large buffer queue. */ 2908 ql_init_large_buffers(qdev); 2909 if (ql_create_send_free_list(qdev)) 2910 goto err_free_list; 2911 2912 qdev->rsp_current = qdev->rsp_q_virt_addr; 2913 2914 return 0; 2915 err_free_list: 2916 ql_free_send_free_list(qdev); 2917 err_small_buffers: 2918 ql_free_buffer_queues(qdev); 2919 err_buffer_queues: 2920 ql_free_net_req_rsp_queues(qdev); 2921 err_req_rsp: 2922 pci_free_consistent(qdev->pdev, 2923 PAGE_SIZE, 2924 qdev->shadow_reg_virt_addr, 2925 qdev->shadow_reg_phy_addr); 2926 2927 return -ENOMEM; 2928 } 2929 2930 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2931 { 2932 ql_free_send_free_list(qdev); 2933 ql_free_large_buffers(qdev); 2934 ql_free_small_buffers(qdev); 2935 ql_free_buffer_queues(qdev); 2936 ql_free_net_req_rsp_queues(qdev); 2937 if (qdev->shadow_reg_virt_addr != NULL) { 2938 pci_free_consistent(qdev->pdev, 2939 PAGE_SIZE, 2940 qdev->shadow_reg_virt_addr, 2941 qdev->shadow_reg_phy_addr); 2942 qdev->shadow_reg_virt_addr = NULL; 2943 } 2944 } 2945 2946 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2947 { 2948 struct ql3xxx_local_ram_registers __iomem *local_ram = 2949 (void __iomem *)qdev->mem_map_registers; 2950 2951 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2952 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2953 2) << 4)) 2954 return -1; 2955 2956 ql_write_page2_reg(qdev, 2957 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2958 2959 ql_write_page2_reg(qdev, 2960 &local_ram->maxBufletCount, 2961 qdev->nvram_data.bufletCount); 2962 2963 ql_write_page2_reg(qdev, 2964 &local_ram->freeBufletThresholdLow, 2965 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2966 (qdev->nvram_data.tcpWindowThreshold0)); 2967 2968 ql_write_page2_reg(qdev, 2969 &local_ram->freeBufletThresholdHigh, 2970 qdev->nvram_data.tcpWindowThreshold50); 2971 2972 ql_write_page2_reg(qdev, 2973 &local_ram->ipHashTableBase, 2974 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2975 qdev->nvram_data.ipHashTableBaseLo); 2976 ql_write_page2_reg(qdev, 2977 &local_ram->ipHashTableCount, 2978 qdev->nvram_data.ipHashTableSize); 2979 ql_write_page2_reg(qdev, 2980 &local_ram->tcpHashTableBase, 2981 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2982 qdev->nvram_data.tcpHashTableBaseLo); 2983 ql_write_page2_reg(qdev, 2984 &local_ram->tcpHashTableCount, 2985 qdev->nvram_data.tcpHashTableSize); 2986 ql_write_page2_reg(qdev, 2987 &local_ram->ncbBase, 2988 (qdev->nvram_data.ncbTableBaseHi << 16) | 2989 qdev->nvram_data.ncbTableBaseLo); 2990 ql_write_page2_reg(qdev, 2991 &local_ram->maxNcbCount, 2992 qdev->nvram_data.ncbTableSize); 2993 ql_write_page2_reg(qdev, 2994 &local_ram->drbBase, 2995 (qdev->nvram_data.drbTableBaseHi << 16) | 2996 qdev->nvram_data.drbTableBaseLo); 2997 ql_write_page2_reg(qdev, 2998 &local_ram->maxDrbCount, 2999 qdev->nvram_data.drbTableSize); 3000 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3001 return 0; 3002 } 3003 3004 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3005 { 3006 u32 value; 3007 struct ql3xxx_port_registers __iomem *port_regs = 3008 qdev->mem_map_registers; 3009 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3010 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3011 (void __iomem *)port_regs; 3012 u32 delay = 10; 3013 int status = 0; 3014 3015 if (ql_mii_setup(qdev)) 3016 return -1; 3017 3018 /* Bring out PHY out of reset */ 3019 ql_write_common_reg(qdev, spir, 3020 (ISP_SERIAL_PORT_IF_WE | 3021 (ISP_SERIAL_PORT_IF_WE << 16))); 3022 /* Give the PHY time to come out of reset. */ 3023 mdelay(100); 3024 qdev->port_link_state = LS_DOWN; 3025 netif_carrier_off(qdev->ndev); 3026 3027 /* V2 chip fix for ARS-39168. */ 3028 ql_write_common_reg(qdev, spir, 3029 (ISP_SERIAL_PORT_IF_SDE | 3030 (ISP_SERIAL_PORT_IF_SDE << 16))); 3031 3032 /* Request Queue Registers */ 3033 *((u32 *)(qdev->preq_consumer_index)) = 0; 3034 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3035 qdev->req_producer_index = 0; 3036 3037 ql_write_page1_reg(qdev, 3038 &hmem_regs->reqConsumerIndexAddrHigh, 3039 qdev->req_consumer_index_phy_addr_high); 3040 ql_write_page1_reg(qdev, 3041 &hmem_regs->reqConsumerIndexAddrLow, 3042 qdev->req_consumer_index_phy_addr_low); 3043 3044 ql_write_page1_reg(qdev, 3045 &hmem_regs->reqBaseAddrHigh, 3046 MS_64BITS(qdev->req_q_phy_addr)); 3047 ql_write_page1_reg(qdev, 3048 &hmem_regs->reqBaseAddrLow, 3049 LS_64BITS(qdev->req_q_phy_addr)); 3050 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3051 3052 /* Response Queue Registers */ 3053 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3054 qdev->rsp_consumer_index = 0; 3055 qdev->rsp_current = qdev->rsp_q_virt_addr; 3056 3057 ql_write_page1_reg(qdev, 3058 &hmem_regs->rspProducerIndexAddrHigh, 3059 qdev->rsp_producer_index_phy_addr_high); 3060 3061 ql_write_page1_reg(qdev, 3062 &hmem_regs->rspProducerIndexAddrLow, 3063 qdev->rsp_producer_index_phy_addr_low); 3064 3065 ql_write_page1_reg(qdev, 3066 &hmem_regs->rspBaseAddrHigh, 3067 MS_64BITS(qdev->rsp_q_phy_addr)); 3068 3069 ql_write_page1_reg(qdev, 3070 &hmem_regs->rspBaseAddrLow, 3071 LS_64BITS(qdev->rsp_q_phy_addr)); 3072 3073 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3074 3075 /* Large Buffer Queue */ 3076 ql_write_page1_reg(qdev, 3077 &hmem_regs->rxLargeQBaseAddrHigh, 3078 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3079 3080 ql_write_page1_reg(qdev, 3081 &hmem_regs->rxLargeQBaseAddrLow, 3082 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3083 3084 ql_write_page1_reg(qdev, 3085 &hmem_regs->rxLargeQLength, 3086 qdev->num_lbufq_entries); 3087 3088 ql_write_page1_reg(qdev, 3089 &hmem_regs->rxLargeBufferLength, 3090 qdev->lrg_buffer_len); 3091 3092 /* Small Buffer Queue */ 3093 ql_write_page1_reg(qdev, 3094 &hmem_regs->rxSmallQBaseAddrHigh, 3095 MS_64BITS(qdev->small_buf_q_phy_addr)); 3096 3097 ql_write_page1_reg(qdev, 3098 &hmem_regs->rxSmallQBaseAddrLow, 3099 LS_64BITS(qdev->small_buf_q_phy_addr)); 3100 3101 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3102 ql_write_page1_reg(qdev, 3103 &hmem_regs->rxSmallBufferLength, 3104 QL_SMALL_BUFFER_SIZE); 3105 3106 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3107 qdev->small_buf_release_cnt = 8; 3108 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3109 qdev->lrg_buf_release_cnt = 8; 3110 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3111 qdev->small_buf_index = 0; 3112 qdev->lrg_buf_index = 0; 3113 qdev->lrg_buf_free_count = 0; 3114 qdev->lrg_buf_free_head = NULL; 3115 qdev->lrg_buf_free_tail = NULL; 3116 3117 ql_write_common_reg(qdev, 3118 &port_regs->CommonRegs. 3119 rxSmallQProducerIndex, 3120 qdev->small_buf_q_producer_index); 3121 ql_write_common_reg(qdev, 3122 &port_regs->CommonRegs. 3123 rxLargeQProducerIndex, 3124 qdev->lrg_buf_q_producer_index); 3125 3126 /* 3127 * Find out if the chip has already been initialized. If it has, then 3128 * we skip some of the initialization. 3129 */ 3130 clear_bit(QL_LINK_MASTER, &qdev->flags); 3131 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3132 if ((value & PORT_STATUS_IC) == 0) { 3133 3134 /* Chip has not been configured yet, so let it rip. */ 3135 if (ql_init_misc_registers(qdev)) { 3136 status = -1; 3137 goto out; 3138 } 3139 3140 value = qdev->nvram_data.tcpMaxWindowSize; 3141 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3142 3143 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3144 3145 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3146 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3147 * 2) << 13)) { 3148 status = -1; 3149 goto out; 3150 } 3151 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3152 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3153 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3154 16) | (INTERNAL_CHIP_SD | 3155 INTERNAL_CHIP_WE))); 3156 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3157 } 3158 3159 if (qdev->mac_index) 3160 ql_write_page0_reg(qdev, 3161 &port_regs->mac1MaxFrameLengthReg, 3162 qdev->max_frame_size); 3163 else 3164 ql_write_page0_reg(qdev, 3165 &port_regs->mac0MaxFrameLengthReg, 3166 qdev->max_frame_size); 3167 3168 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3169 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3170 2) << 7)) { 3171 status = -1; 3172 goto out; 3173 } 3174 3175 PHY_Setup(qdev); 3176 ql_init_scan_mode(qdev); 3177 ql_get_phy_owner(qdev); 3178 3179 /* Load the MAC Configuration */ 3180 3181 /* Program lower 32 bits of the MAC address */ 3182 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3183 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3184 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3185 ((qdev->ndev->dev_addr[2] << 24) 3186 | (qdev->ndev->dev_addr[3] << 16) 3187 | (qdev->ndev->dev_addr[4] << 8) 3188 | qdev->ndev->dev_addr[5])); 3189 3190 /* Program top 16 bits of the MAC address */ 3191 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3192 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3193 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3194 ((qdev->ndev->dev_addr[0] << 8) 3195 | qdev->ndev->dev_addr[1])); 3196 3197 /* Enable Primary MAC */ 3198 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3199 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3200 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3201 3202 /* Clear Primary and Secondary IP addresses */ 3203 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3204 ((IP_ADDR_INDEX_REG_MASK << 16) | 3205 (qdev->mac_index << 2))); 3206 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3207 3208 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3209 ((IP_ADDR_INDEX_REG_MASK << 16) | 3210 ((qdev->mac_index << 2) + 1))); 3211 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3212 3213 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3214 3215 /* Indicate Configuration Complete */ 3216 ql_write_page0_reg(qdev, 3217 &port_regs->portControl, 3218 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3219 3220 do { 3221 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3222 if (value & PORT_STATUS_IC) 3223 break; 3224 spin_unlock_irq(&qdev->hw_lock); 3225 msleep(500); 3226 spin_lock_irq(&qdev->hw_lock); 3227 } while (--delay); 3228 3229 if (delay == 0) { 3230 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3231 status = -1; 3232 goto out; 3233 } 3234 3235 /* Enable Ethernet Function */ 3236 if (qdev->device_id == QL3032_DEVICE_ID) { 3237 value = 3238 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3239 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3240 QL3032_PORT_CONTROL_ET); 3241 ql_write_page0_reg(qdev, &port_regs->functionControl, 3242 ((value << 16) | value)); 3243 } else { 3244 value = 3245 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3246 PORT_CONTROL_HH); 3247 ql_write_page0_reg(qdev, &port_regs->portControl, 3248 ((value << 16) | value)); 3249 } 3250 3251 3252 out: 3253 return status; 3254 } 3255 3256 /* 3257 * Caller holds hw_lock. 3258 */ 3259 static int ql_adapter_reset(struct ql3_adapter *qdev) 3260 { 3261 struct ql3xxx_port_registers __iomem *port_regs = 3262 qdev->mem_map_registers; 3263 int status = 0; 3264 u16 value; 3265 int max_wait_time; 3266 3267 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3268 clear_bit(QL_RESET_DONE, &qdev->flags); 3269 3270 /* 3271 * Issue soft reset to chip. 3272 */ 3273 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3274 ql_write_common_reg(qdev, 3275 &port_regs->CommonRegs.ispControlStatus, 3276 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3277 3278 /* Wait 3 seconds for reset to complete. */ 3279 netdev_printk(KERN_DEBUG, qdev->ndev, 3280 "Wait 10 milliseconds for reset to complete\n"); 3281 3282 /* Wait until the firmware tells us the Soft Reset is done */ 3283 max_wait_time = 5; 3284 do { 3285 value = 3286 ql_read_common_reg(qdev, 3287 &port_regs->CommonRegs.ispControlStatus); 3288 if ((value & ISP_CONTROL_SR) == 0) 3289 break; 3290 3291 ssleep(1); 3292 } while ((--max_wait_time)); 3293 3294 /* 3295 * Also, make sure that the Network Reset Interrupt bit has been 3296 * cleared after the soft reset has taken place. 3297 */ 3298 value = 3299 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3300 if (value & ISP_CONTROL_RI) { 3301 netdev_printk(KERN_DEBUG, qdev->ndev, 3302 "clearing RI after reset\n"); 3303 ql_write_common_reg(qdev, 3304 &port_regs->CommonRegs. 3305 ispControlStatus, 3306 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3307 } 3308 3309 if (max_wait_time == 0) { 3310 /* Issue Force Soft Reset */ 3311 ql_write_common_reg(qdev, 3312 &port_regs->CommonRegs. 3313 ispControlStatus, 3314 ((ISP_CONTROL_FSR << 16) | 3315 ISP_CONTROL_FSR)); 3316 /* 3317 * Wait until the firmware tells us the Force Soft Reset is 3318 * done 3319 */ 3320 max_wait_time = 5; 3321 do { 3322 value = ql_read_common_reg(qdev, 3323 &port_regs->CommonRegs. 3324 ispControlStatus); 3325 if ((value & ISP_CONTROL_FSR) == 0) 3326 break; 3327 ssleep(1); 3328 } while ((--max_wait_time)); 3329 } 3330 if (max_wait_time == 0) 3331 status = 1; 3332 3333 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3334 set_bit(QL_RESET_DONE, &qdev->flags); 3335 return status; 3336 } 3337 3338 static void ql_set_mac_info(struct ql3_adapter *qdev) 3339 { 3340 struct ql3xxx_port_registers __iomem *port_regs = 3341 qdev->mem_map_registers; 3342 u32 value, port_status; 3343 u8 func_number; 3344 3345 /* Get the function number */ 3346 value = 3347 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3348 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3349 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3350 switch (value & ISP_CONTROL_FN_MASK) { 3351 case ISP_CONTROL_FN0_NET: 3352 qdev->mac_index = 0; 3353 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3354 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3355 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3356 if (port_status & PORT_STATUS_SM0) 3357 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3358 else 3359 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3360 break; 3361 3362 case ISP_CONTROL_FN1_NET: 3363 qdev->mac_index = 1; 3364 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3365 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3366 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3367 if (port_status & PORT_STATUS_SM1) 3368 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3369 else 3370 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3371 break; 3372 3373 case ISP_CONTROL_FN0_SCSI: 3374 case ISP_CONTROL_FN1_SCSI: 3375 default: 3376 netdev_printk(KERN_DEBUG, qdev->ndev, 3377 "Invalid function number, ispControlStatus = 0x%x\n", 3378 value); 3379 break; 3380 } 3381 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3382 } 3383 3384 static void ql_display_dev_info(struct net_device *ndev) 3385 { 3386 struct ql3_adapter *qdev = netdev_priv(ndev); 3387 struct pci_dev *pdev = qdev->pdev; 3388 3389 netdev_info(ndev, 3390 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3391 DRV_NAME, qdev->index, qdev->chip_rev_id, 3392 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3393 qdev->pci_slot); 3394 netdev_info(ndev, "%s Interface\n", 3395 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3396 3397 /* 3398 * Print PCI bus width/type. 3399 */ 3400 netdev_info(ndev, "Bus interface is %s %s\n", 3401 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3402 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3403 3404 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3405 qdev->mem_map_registers); 3406 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3407 3408 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3409 } 3410 3411 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3412 { 3413 struct net_device *ndev = qdev->ndev; 3414 int retval = 0; 3415 3416 netif_stop_queue(ndev); 3417 netif_carrier_off(ndev); 3418 3419 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3420 clear_bit(QL_LINK_MASTER, &qdev->flags); 3421 3422 ql_disable_interrupts(qdev); 3423 3424 free_irq(qdev->pdev->irq, ndev); 3425 3426 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3427 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3428 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3429 pci_disable_msi(qdev->pdev); 3430 } 3431 3432 del_timer_sync(&qdev->adapter_timer); 3433 3434 napi_disable(&qdev->napi); 3435 3436 if (do_reset) { 3437 int soft_reset; 3438 unsigned long hw_flags; 3439 3440 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3441 if (ql_wait_for_drvr_lock(qdev)) { 3442 soft_reset = ql_adapter_reset(qdev); 3443 if (soft_reset) { 3444 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3445 qdev->index); 3446 } 3447 netdev_err(ndev, 3448 "Releasing driver lock via chip reset\n"); 3449 } else { 3450 netdev_err(ndev, 3451 "Could not acquire driver lock to do reset!\n"); 3452 retval = -1; 3453 } 3454 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3455 } 3456 ql_free_mem_resources(qdev); 3457 return retval; 3458 } 3459 3460 static int ql_adapter_up(struct ql3_adapter *qdev) 3461 { 3462 struct net_device *ndev = qdev->ndev; 3463 int err; 3464 unsigned long irq_flags = IRQF_SHARED; 3465 unsigned long hw_flags; 3466 3467 if (ql_alloc_mem_resources(qdev)) { 3468 netdev_err(ndev, "Unable to allocate buffers\n"); 3469 return -ENOMEM; 3470 } 3471 3472 if (qdev->msi) { 3473 if (pci_enable_msi(qdev->pdev)) { 3474 netdev_err(ndev, 3475 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3476 qdev->msi = 0; 3477 } else { 3478 netdev_info(ndev, "MSI Enabled...\n"); 3479 set_bit(QL_MSI_ENABLED, &qdev->flags); 3480 irq_flags &= ~IRQF_SHARED; 3481 } 3482 } 3483 3484 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3485 irq_flags, ndev->name, ndev); 3486 if (err) { 3487 netdev_err(ndev, 3488 "Failed to reserve interrupt %d - already in use\n", 3489 qdev->pdev->irq); 3490 goto err_irq; 3491 } 3492 3493 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3494 3495 err = ql_wait_for_drvr_lock(qdev); 3496 if (err) { 3497 err = ql_adapter_initialize(qdev); 3498 if (err) { 3499 netdev_err(ndev, "Unable to initialize adapter\n"); 3500 goto err_init; 3501 } 3502 netdev_err(ndev, "Releasing driver lock\n"); 3503 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3504 } else { 3505 netdev_err(ndev, "Could not acquire driver lock\n"); 3506 goto err_lock; 3507 } 3508 3509 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3510 3511 set_bit(QL_ADAPTER_UP, &qdev->flags); 3512 3513 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3514 3515 napi_enable(&qdev->napi); 3516 ql_enable_interrupts(qdev); 3517 return 0; 3518 3519 err_init: 3520 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3521 err_lock: 3522 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3523 free_irq(qdev->pdev->irq, ndev); 3524 err_irq: 3525 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3526 netdev_info(ndev, "calling pci_disable_msi()\n"); 3527 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3528 pci_disable_msi(qdev->pdev); 3529 } 3530 return err; 3531 } 3532 3533 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3534 { 3535 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3536 netdev_err(qdev->ndev, 3537 "Driver up/down cycle failed, closing device\n"); 3538 rtnl_lock(); 3539 dev_close(qdev->ndev); 3540 rtnl_unlock(); 3541 return -1; 3542 } 3543 return 0; 3544 } 3545 3546 static int ql3xxx_close(struct net_device *ndev) 3547 { 3548 struct ql3_adapter *qdev = netdev_priv(ndev); 3549 3550 /* 3551 * Wait for device to recover from a reset. 3552 * (Rarely happens, but possible.) 3553 */ 3554 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3555 msleep(50); 3556 3557 ql_adapter_down(qdev, QL_DO_RESET); 3558 return 0; 3559 } 3560 3561 static int ql3xxx_open(struct net_device *ndev) 3562 { 3563 struct ql3_adapter *qdev = netdev_priv(ndev); 3564 return ql_adapter_up(qdev); 3565 } 3566 3567 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3568 { 3569 struct ql3_adapter *qdev = netdev_priv(ndev); 3570 struct ql3xxx_port_registers __iomem *port_regs = 3571 qdev->mem_map_registers; 3572 struct sockaddr *addr = p; 3573 unsigned long hw_flags; 3574 3575 if (netif_running(ndev)) 3576 return -EBUSY; 3577 3578 if (!is_valid_ether_addr(addr->sa_data)) 3579 return -EADDRNOTAVAIL; 3580 3581 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3582 3583 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3584 /* Program lower 32 bits of the MAC address */ 3585 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3586 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3587 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3588 ((ndev->dev_addr[2] << 24) | (ndev-> 3589 dev_addr[3] << 16) | 3590 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3591 3592 /* Program top 16 bits of the MAC address */ 3593 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3594 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3595 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3596 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3597 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3598 3599 return 0; 3600 } 3601 3602 static void ql3xxx_tx_timeout(struct net_device *ndev) 3603 { 3604 struct ql3_adapter *qdev = netdev_priv(ndev); 3605 3606 netdev_err(ndev, "Resetting...\n"); 3607 /* 3608 * Stop the queues, we've got a problem. 3609 */ 3610 netif_stop_queue(ndev); 3611 3612 /* 3613 * Wake up the worker to process this event. 3614 */ 3615 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3616 } 3617 3618 static void ql_reset_work(struct work_struct *work) 3619 { 3620 struct ql3_adapter *qdev = 3621 container_of(work, struct ql3_adapter, reset_work.work); 3622 struct net_device *ndev = qdev->ndev; 3623 u32 value; 3624 struct ql_tx_buf_cb *tx_cb; 3625 int max_wait_time, i; 3626 struct ql3xxx_port_registers __iomem *port_regs = 3627 qdev->mem_map_registers; 3628 unsigned long hw_flags; 3629 3630 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3631 clear_bit(QL_LINK_MASTER, &qdev->flags); 3632 3633 /* 3634 * Loop through the active list and return the skb. 3635 */ 3636 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3637 int j; 3638 tx_cb = &qdev->tx_buf[i]; 3639 if (tx_cb->skb) { 3640 netdev_printk(KERN_DEBUG, ndev, 3641 "Freeing lost SKB\n"); 3642 pci_unmap_single(qdev->pdev, 3643 dma_unmap_addr(&tx_cb->map[0], 3644 mapaddr), 3645 dma_unmap_len(&tx_cb->map[0], maplen), 3646 PCI_DMA_TODEVICE); 3647 for (j = 1; j < tx_cb->seg_count; j++) { 3648 pci_unmap_page(qdev->pdev, 3649 dma_unmap_addr(&tx_cb->map[j], 3650 mapaddr), 3651 dma_unmap_len(&tx_cb->map[j], 3652 maplen), 3653 PCI_DMA_TODEVICE); 3654 } 3655 dev_kfree_skb(tx_cb->skb); 3656 tx_cb->skb = NULL; 3657 } 3658 } 3659 3660 netdev_err(ndev, "Clearing NRI after reset\n"); 3661 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3662 ql_write_common_reg(qdev, 3663 &port_regs->CommonRegs. 3664 ispControlStatus, 3665 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3666 /* 3667 * Wait the for Soft Reset to Complete. 3668 */ 3669 max_wait_time = 10; 3670 do { 3671 value = ql_read_common_reg(qdev, 3672 &port_regs->CommonRegs. 3673 3674 ispControlStatus); 3675 if ((value & ISP_CONTROL_SR) == 0) { 3676 netdev_printk(KERN_DEBUG, ndev, 3677 "reset completed\n"); 3678 break; 3679 } 3680 3681 if (value & ISP_CONTROL_RI) { 3682 netdev_printk(KERN_DEBUG, ndev, 3683 "clearing NRI after reset\n"); 3684 ql_write_common_reg(qdev, 3685 &port_regs-> 3686 CommonRegs. 3687 ispControlStatus, 3688 ((ISP_CONTROL_RI << 3689 16) | ISP_CONTROL_RI)); 3690 } 3691 3692 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3693 ssleep(1); 3694 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3695 } while (--max_wait_time); 3696 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3697 3698 if (value & ISP_CONTROL_SR) { 3699 3700 /* 3701 * Set the reset flags and clear the board again. 3702 * Nothing else to do... 3703 */ 3704 netdev_err(ndev, 3705 "Timed out waiting for reset to complete\n"); 3706 netdev_err(ndev, "Do a reset\n"); 3707 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3708 clear_bit(QL_RESET_START, &qdev->flags); 3709 ql_cycle_adapter(qdev, QL_DO_RESET); 3710 return; 3711 } 3712 3713 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3714 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3715 clear_bit(QL_RESET_START, &qdev->flags); 3716 ql_cycle_adapter(qdev, QL_NO_RESET); 3717 } 3718 } 3719 3720 static void ql_tx_timeout_work(struct work_struct *work) 3721 { 3722 struct ql3_adapter *qdev = 3723 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3724 3725 ql_cycle_adapter(qdev, QL_DO_RESET); 3726 } 3727 3728 static void ql_get_board_info(struct ql3_adapter *qdev) 3729 { 3730 struct ql3xxx_port_registers __iomem *port_regs = 3731 qdev->mem_map_registers; 3732 u32 value; 3733 3734 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3735 3736 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3737 if (value & PORT_STATUS_64) 3738 qdev->pci_width = 64; 3739 else 3740 qdev->pci_width = 32; 3741 if (value & PORT_STATUS_X) 3742 qdev->pci_x = 1; 3743 else 3744 qdev->pci_x = 0; 3745 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3746 } 3747 3748 static void ql3xxx_timer(unsigned long ptr) 3749 { 3750 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3751 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3752 } 3753 3754 static const struct net_device_ops ql3xxx_netdev_ops = { 3755 .ndo_open = ql3xxx_open, 3756 .ndo_start_xmit = ql3xxx_send, 3757 .ndo_stop = ql3xxx_close, 3758 .ndo_change_mtu = eth_change_mtu, 3759 .ndo_validate_addr = eth_validate_addr, 3760 .ndo_set_mac_address = ql3xxx_set_mac_address, 3761 .ndo_tx_timeout = ql3xxx_tx_timeout, 3762 }; 3763 3764 static int ql3xxx_probe(struct pci_dev *pdev, 3765 const struct pci_device_id *pci_entry) 3766 { 3767 struct net_device *ndev = NULL; 3768 struct ql3_adapter *qdev = NULL; 3769 static int cards_found; 3770 int uninitialized_var(pci_using_dac), err; 3771 3772 err = pci_enable_device(pdev); 3773 if (err) { 3774 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3775 goto err_out; 3776 } 3777 3778 err = pci_request_regions(pdev, DRV_NAME); 3779 if (err) { 3780 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3781 goto err_out_disable_pdev; 3782 } 3783 3784 pci_set_master(pdev); 3785 3786 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3787 pci_using_dac = 1; 3788 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3789 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3790 pci_using_dac = 0; 3791 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3792 } 3793 3794 if (err) { 3795 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3796 goto err_out_free_regions; 3797 } 3798 3799 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3800 if (!ndev) { 3801 err = -ENOMEM; 3802 goto err_out_free_regions; 3803 } 3804 3805 SET_NETDEV_DEV(ndev, &pdev->dev); 3806 3807 pci_set_drvdata(pdev, ndev); 3808 3809 qdev = netdev_priv(ndev); 3810 qdev->index = cards_found; 3811 qdev->ndev = ndev; 3812 qdev->pdev = pdev; 3813 qdev->device_id = pci_entry->device; 3814 qdev->port_link_state = LS_DOWN; 3815 if (msi) 3816 qdev->msi = 1; 3817 3818 qdev->msg_enable = netif_msg_init(debug, default_msg); 3819 3820 if (pci_using_dac) 3821 ndev->features |= NETIF_F_HIGHDMA; 3822 if (qdev->device_id == QL3032_DEVICE_ID) 3823 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3824 3825 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3826 if (!qdev->mem_map_registers) { 3827 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3828 err = -EIO; 3829 goto err_out_free_ndev; 3830 } 3831 3832 spin_lock_init(&qdev->adapter_lock); 3833 spin_lock_init(&qdev->hw_lock); 3834 3835 /* Set driver entry points */ 3836 ndev->netdev_ops = &ql3xxx_netdev_ops; 3837 ndev->ethtool_ops = &ql3xxx_ethtool_ops; 3838 ndev->watchdog_timeo = 5 * HZ; 3839 3840 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3841 3842 ndev->irq = pdev->irq; 3843 3844 /* make sure the EEPROM is good */ 3845 if (ql_get_nvram_params(qdev)) { 3846 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3847 __func__, qdev->index); 3848 err = -EIO; 3849 goto err_out_iounmap; 3850 } 3851 3852 ql_set_mac_info(qdev); 3853 3854 /* Validate and set parameters */ 3855 if (qdev->mac_index) { 3856 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3857 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3858 } else { 3859 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3860 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3861 } 3862 3863 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3864 3865 /* Record PCI bus information. */ 3866 ql_get_board_info(qdev); 3867 3868 /* 3869 * Set the Maximum Memory Read Byte Count value. We do this to handle 3870 * jumbo frames. 3871 */ 3872 if (qdev->pci_x) 3873 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3874 3875 err = register_netdev(ndev); 3876 if (err) { 3877 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3878 goto err_out_iounmap; 3879 } 3880 3881 /* we're going to reset, so assume we have no link for now */ 3882 3883 netif_carrier_off(ndev); 3884 netif_stop_queue(ndev); 3885 3886 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3887 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3888 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3889 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3890 3891 init_timer(&qdev->adapter_timer); 3892 qdev->adapter_timer.function = ql3xxx_timer; 3893 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3894 qdev->adapter_timer.data = (unsigned long)qdev; 3895 3896 if (!cards_found) { 3897 pr_alert("%s\n", DRV_STRING); 3898 pr_alert("Driver name: %s, Version: %s\n", 3899 DRV_NAME, DRV_VERSION); 3900 } 3901 ql_display_dev_info(ndev); 3902 3903 cards_found++; 3904 return 0; 3905 3906 err_out_iounmap: 3907 iounmap(qdev->mem_map_registers); 3908 err_out_free_ndev: 3909 free_netdev(ndev); 3910 err_out_free_regions: 3911 pci_release_regions(pdev); 3912 err_out_disable_pdev: 3913 pci_disable_device(pdev); 3914 err_out: 3915 return err; 3916 } 3917 3918 static void ql3xxx_remove(struct pci_dev *pdev) 3919 { 3920 struct net_device *ndev = pci_get_drvdata(pdev); 3921 struct ql3_adapter *qdev = netdev_priv(ndev); 3922 3923 unregister_netdev(ndev); 3924 3925 ql_disable_interrupts(qdev); 3926 3927 if (qdev->workqueue) { 3928 cancel_delayed_work(&qdev->reset_work); 3929 cancel_delayed_work(&qdev->tx_timeout_work); 3930 destroy_workqueue(qdev->workqueue); 3931 qdev->workqueue = NULL; 3932 } 3933 3934 iounmap(qdev->mem_map_registers); 3935 pci_release_regions(pdev); 3936 free_netdev(ndev); 3937 } 3938 3939 static struct pci_driver ql3xxx_driver = { 3940 3941 .name = DRV_NAME, 3942 .id_table = ql3xxx_pci_tbl, 3943 .probe = ql3xxx_probe, 3944 .remove = ql3xxx_remove, 3945 }; 3946 3947 module_pci_driver(ql3xxx_driver); 3948