1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/pci.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dmapool.h> 19 #include <linux/mempool.h> 20 #include <linux/spinlock.h> 21 #include <linux/kthread.h> 22 #include <linux/interrupt.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/ip.h> 26 #include <linux/in.h> 27 #include <linux/if_arp.h> 28 #include <linux/if_ether.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/ethtool.h> 32 #include <linux/skbuff.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/if_vlan.h> 35 #include <linux/delay.h> 36 #include <linux/mm.h> 37 #include <linux/prefetch.h> 38 39 #include "qla3xxx.h" 40 41 #define DRV_NAME "qla3xxx" 42 #define DRV_STRING "QLogic ISP3XXX Network Driver" 43 #define DRV_VERSION "v2.03.00-k5" 44 45 static const char ql3xxx_driver_name[] = DRV_NAME; 46 static const char ql3xxx_driver_version[] = DRV_VERSION; 47 48 #define TIMED_OUT_MSG \ 49 "Timed out waiting for management port to get free before issuing command\n" 50 51 MODULE_AUTHOR("QLogic Corporation"); 52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 53 MODULE_LICENSE("GPL"); 54 MODULE_VERSION(DRV_VERSION); 55 56 static const u32 default_msg 57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 59 60 static int debug = -1; /* defaults above */ 61 module_param(debug, int, 0); 62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64 static int msi; 65 module_param(msi, int, 0); 66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 67 68 static const struct pci_device_id ql3xxx_pci_tbl[] = { 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 71 /* required last entry */ 72 {0,} 73 }; 74 75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 76 77 /* 78 * These are the known PHY's which are used 79 */ 80 enum PHY_DEVICE_TYPE { 81 PHY_TYPE_UNKNOWN = 0, 82 PHY_VITESSE_VSC8211, 83 PHY_AGERE_ET1011C, 84 MAX_PHY_DEV_TYPES 85 }; 86 87 struct PHY_DEVICE_INFO { 88 const enum PHY_DEVICE_TYPE phyDevice; 89 const u32 phyIdOUI; 90 const u16 phyIdModel; 91 const char *name; 92 }; 93 94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 98 }; 99 100 101 /* 102 * Caller must take hw_lock. 103 */ 104 static int ql_sem_spinlock(struct ql3_adapter *qdev, 105 u32 sem_mask, u32 sem_bits) 106 { 107 struct ql3xxx_port_registers __iomem *port_regs = 108 qdev->mem_map_registers; 109 u32 value; 110 unsigned int seconds = 3; 111 112 do { 113 writel((sem_mask | sem_bits), 114 &port_regs->CommonRegs.semaphoreReg); 115 value = readl(&port_regs->CommonRegs.semaphoreReg); 116 if ((value & (sem_mask >> 16)) == sem_bits) 117 return 0; 118 ssleep(1); 119 } while (--seconds); 120 return -1; 121 } 122 123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 124 { 125 struct ql3xxx_port_registers __iomem *port_regs = 126 qdev->mem_map_registers; 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg); 129 } 130 131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 132 { 133 struct ql3xxx_port_registers __iomem *port_regs = 134 qdev->mem_map_registers; 135 u32 value; 136 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 138 value = readl(&port_regs->CommonRegs.semaphoreReg); 139 return ((value & (sem_mask >> 16)) == sem_bits); 140 } 141 142 /* 143 * Caller holds hw_lock. 144 */ 145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 146 { 147 int i = 0; 148 149 do { 150 if (ql_sem_lock(qdev, 151 QL_DRVR_SEM_MASK, 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 153 * 2) << 1)) { 154 netdev_printk(KERN_DEBUG, qdev->ndev, 155 "driver lock acquired\n"); 156 return 1; 157 } 158 ssleep(1); 159 } while (++i < 10); 160 161 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 162 return 0; 163 } 164 165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 166 { 167 struct ql3xxx_port_registers __iomem *port_regs = 168 qdev->mem_map_registers; 169 170 writel(((ISP_CONTROL_NP_MASK << 16) | page), 171 &port_regs->CommonRegs.ispControlStatus); 172 readl(&port_regs->CommonRegs.ispControlStatus); 173 qdev->current_page = page; 174 } 175 176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 177 { 178 u32 value; 179 unsigned long hw_flags; 180 181 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 182 value = readl(reg); 183 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 184 185 return value; 186 } 187 188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 189 { 190 return readl(reg); 191 } 192 193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 194 { 195 u32 value; 196 unsigned long hw_flags; 197 198 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 199 200 if (qdev->current_page != 0) 201 ql_set_register_page(qdev, 0); 202 value = readl(reg); 203 204 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 205 return value; 206 } 207 208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 209 { 210 if (qdev->current_page != 0) 211 ql_set_register_page(qdev, 0); 212 return readl(reg); 213 } 214 215 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 216 u32 __iomem *reg, u32 value) 217 { 218 unsigned long hw_flags; 219 220 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 221 writel(value, reg); 222 readl(reg); 223 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 224 } 225 226 static void ql_write_common_reg(struct ql3_adapter *qdev, 227 u32 __iomem *reg, u32 value) 228 { 229 writel(value, reg); 230 readl(reg); 231 } 232 233 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 234 u32 __iomem *reg, u32 value) 235 { 236 writel(value, reg); 237 readl(reg); 238 udelay(1); 239 } 240 241 static void ql_write_page0_reg(struct ql3_adapter *qdev, 242 u32 __iomem *reg, u32 value) 243 { 244 if (qdev->current_page != 0) 245 ql_set_register_page(qdev, 0); 246 writel(value, reg); 247 readl(reg); 248 } 249 250 /* 251 * Caller holds hw_lock. Only called during init. 252 */ 253 static void ql_write_page1_reg(struct ql3_adapter *qdev, 254 u32 __iomem *reg, u32 value) 255 { 256 if (qdev->current_page != 1) 257 ql_set_register_page(qdev, 1); 258 writel(value, reg); 259 readl(reg); 260 } 261 262 /* 263 * Caller holds hw_lock. Only called during init. 264 */ 265 static void ql_write_page2_reg(struct ql3_adapter *qdev, 266 u32 __iomem *reg, u32 value) 267 { 268 if (qdev->current_page != 2) 269 ql_set_register_page(qdev, 2); 270 writel(value, reg); 271 readl(reg); 272 } 273 274 static void ql_disable_interrupts(struct ql3_adapter *qdev) 275 { 276 struct ql3xxx_port_registers __iomem *port_regs = 277 qdev->mem_map_registers; 278 279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 280 (ISP_IMR_ENABLE_INT << 16)); 281 282 } 283 284 static void ql_enable_interrupts(struct ql3_adapter *qdev) 285 { 286 struct ql3xxx_port_registers __iomem *port_regs = 287 qdev->mem_map_registers; 288 289 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 290 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 291 292 } 293 294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 295 struct ql_rcv_buf_cb *lrg_buf_cb) 296 { 297 dma_addr_t map; 298 int err; 299 lrg_buf_cb->next = NULL; 300 301 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 302 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 303 } else { 304 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 305 qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } 307 308 if (!lrg_buf_cb->skb) { 309 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 310 qdev->lrg_buffer_len); 311 if (unlikely(!lrg_buf_cb->skb)) { 312 qdev->lrg_buf_skb_check++; 313 } else { 314 /* 315 * We save some space to copy the ethhdr from first 316 * buffer 317 */ 318 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 319 map = pci_map_single(qdev->pdev, 320 lrg_buf_cb->skb->data, 321 qdev->lrg_buffer_len - 322 QL_HEADER_SPACE, 323 PCI_DMA_FROMDEVICE); 324 err = pci_dma_mapping_error(qdev->pdev, map); 325 if (err) { 326 netdev_err(qdev->ndev, 327 "PCI mapping failed with error: %d\n", 328 err); 329 dev_kfree_skb(lrg_buf_cb->skb); 330 lrg_buf_cb->skb = NULL; 331 332 qdev->lrg_buf_skb_check++; 333 return; 334 } 335 336 lrg_buf_cb->buf_phy_addr_low = 337 cpu_to_le32(LS_64BITS(map)); 338 lrg_buf_cb->buf_phy_addr_high = 339 cpu_to_le32(MS_64BITS(map)); 340 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 341 dma_unmap_len_set(lrg_buf_cb, maplen, 342 qdev->lrg_buffer_len - 343 QL_HEADER_SPACE); 344 } 345 } 346 347 qdev->lrg_buf_free_count++; 348 } 349 350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 351 *qdev) 352 { 353 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 354 355 if (lrg_buf_cb != NULL) { 356 qdev->lrg_buf_free_head = lrg_buf_cb->next; 357 if (qdev->lrg_buf_free_head == NULL) 358 qdev->lrg_buf_free_tail = NULL; 359 qdev->lrg_buf_free_count--; 360 } 361 362 return lrg_buf_cb; 363 } 364 365 static u32 addrBits = EEPROM_NO_ADDR_BITS; 366 static u32 dataBits = EEPROM_NO_DATA_BITS; 367 368 static void fm93c56a_deselect(struct ql3_adapter *qdev); 369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 370 unsigned short *value); 371 372 /* 373 * Caller holds hw_lock. 374 */ 375 static void fm93c56a_select(struct ql3_adapter *qdev) 376 { 377 struct ql3xxx_port_registers __iomem *port_regs = 378 qdev->mem_map_registers; 379 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 380 381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 383 } 384 385 /* 386 * Caller holds hw_lock. 387 */ 388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 389 { 390 int i; 391 u32 mask; 392 u32 dataBit; 393 u32 previousBit; 394 struct ql3xxx_port_registers __iomem *port_regs = 395 qdev->mem_map_registers; 396 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 397 398 /* Clock in a zero, then do the start bit */ 399 ql_write_nvram_reg(qdev, spir, 400 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 401 AUBURN_EEPROM_DO_1)); 402 ql_write_nvram_reg(qdev, spir, 403 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 404 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 405 ql_write_nvram_reg(qdev, spir, 406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 407 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 408 409 mask = 1 << (FM93C56A_CMD_BITS - 1); 410 /* Force the previous data bit to be different */ 411 previousBit = 0xffff; 412 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 413 dataBit = (cmd & mask) 414 ? AUBURN_EEPROM_DO_1 415 : AUBURN_EEPROM_DO_0; 416 if (previousBit != dataBit) { 417 /* If the bit changed, change the DO state to match */ 418 ql_write_nvram_reg(qdev, spir, 419 (ISP_NVRAM_MASK | 420 qdev->eeprom_cmd_data | dataBit)); 421 previousBit = dataBit; 422 } 423 ql_write_nvram_reg(qdev, spir, 424 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 425 dataBit | AUBURN_EEPROM_CLK_RISE)); 426 ql_write_nvram_reg(qdev, spir, 427 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 428 dataBit | AUBURN_EEPROM_CLK_FALL)); 429 cmd = cmd << 1; 430 } 431 432 mask = 1 << (addrBits - 1); 433 /* Force the previous data bit to be different */ 434 previousBit = 0xffff; 435 for (i = 0; i < addrBits; i++) { 436 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 437 : AUBURN_EEPROM_DO_0; 438 if (previousBit != dataBit) { 439 /* 440 * If the bit changed, then change the DO state to 441 * match 442 */ 443 ql_write_nvram_reg(qdev, spir, 444 (ISP_NVRAM_MASK | 445 qdev->eeprom_cmd_data | dataBit)); 446 previousBit = dataBit; 447 } 448 ql_write_nvram_reg(qdev, spir, 449 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 450 dataBit | AUBURN_EEPROM_CLK_RISE)); 451 ql_write_nvram_reg(qdev, spir, 452 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 453 dataBit | AUBURN_EEPROM_CLK_FALL)); 454 eepromAddr = eepromAddr << 1; 455 } 456 } 457 458 /* 459 * Caller holds hw_lock. 460 */ 461 static void fm93c56a_deselect(struct ql3_adapter *qdev) 462 { 463 struct ql3xxx_port_registers __iomem *port_regs = 464 qdev->mem_map_registers; 465 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 466 467 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 468 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 469 } 470 471 /* 472 * Caller holds hw_lock. 473 */ 474 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 475 { 476 int i; 477 u32 data = 0; 478 u32 dataBit; 479 struct ql3xxx_port_registers __iomem *port_regs = 480 qdev->mem_map_registers; 481 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 482 483 /* Read the data bits */ 484 /* The first bit is a dummy. Clock right over it. */ 485 for (i = 0; i < dataBits; i++) { 486 ql_write_nvram_reg(qdev, spir, 487 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 488 AUBURN_EEPROM_CLK_RISE); 489 ql_write_nvram_reg(qdev, spir, 490 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 491 AUBURN_EEPROM_CLK_FALL); 492 dataBit = (ql_read_common_reg(qdev, spir) & 493 AUBURN_EEPROM_DI_1) ? 1 : 0; 494 data = (data << 1) | dataBit; 495 } 496 *value = (u16)data; 497 } 498 499 /* 500 * Caller holds hw_lock. 501 */ 502 static void eeprom_readword(struct ql3_adapter *qdev, 503 u32 eepromAddr, unsigned short *value) 504 { 505 fm93c56a_select(qdev); 506 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 507 fm93c56a_datain(qdev, value); 508 fm93c56a_deselect(qdev); 509 } 510 511 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 512 { 513 __le16 *p = (__le16 *)ndev->dev_addr; 514 p[0] = cpu_to_le16(addr[0]); 515 p[1] = cpu_to_le16(addr[1]); 516 p[2] = cpu_to_le16(addr[2]); 517 } 518 519 static int ql_get_nvram_params(struct ql3_adapter *qdev) 520 { 521 u16 *pEEPROMData; 522 u16 checksum = 0; 523 u32 index; 524 unsigned long hw_flags; 525 526 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 527 528 pEEPROMData = (u16 *)&qdev->nvram_data; 529 qdev->eeprom_cmd_data = 0; 530 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 531 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 532 2) << 10)) { 533 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 535 return -1; 536 } 537 538 for (index = 0; index < EEPROM_SIZE; index++) { 539 eeprom_readword(qdev, index, pEEPROMData); 540 checksum += *pEEPROMData; 541 pEEPROMData++; 542 } 543 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 544 545 if (checksum != 0) { 546 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 547 checksum); 548 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 549 return -1; 550 } 551 552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 553 return checksum; 554 } 555 556 static const u32 PHYAddr[2] = { 557 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 558 }; 559 560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 561 { 562 struct ql3xxx_port_registers __iomem *port_regs = 563 qdev->mem_map_registers; 564 u32 temp; 565 int count = 1000; 566 567 while (count) { 568 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 569 if (!(temp & MAC_MII_STATUS_BSY)) 570 return 0; 571 udelay(10); 572 count--; 573 } 574 return -1; 575 } 576 577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 578 { 579 struct ql3xxx_port_registers __iomem *port_regs = 580 qdev->mem_map_registers; 581 u32 scanControl; 582 583 if (qdev->numPorts > 1) { 584 /* Auto scan will cycle through multiple ports */ 585 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 586 } else { 587 scanControl = MAC_MII_CONTROL_SC; 588 } 589 590 /* 591 * Scan register 1 of PHY/PETBI, 592 * Set up to scan both devices 593 * The autoscan starts from the first register, completes 594 * the last one before rolling over to the first 595 */ 596 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 597 PHYAddr[0] | MII_SCAN_REGISTER); 598 599 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 600 (scanControl) | 601 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 602 } 603 604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 605 { 606 u8 ret; 607 struct ql3xxx_port_registers __iomem *port_regs = 608 qdev->mem_map_registers; 609 610 /* See if scan mode is enabled before we turn it off */ 611 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 612 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 613 /* Scan is enabled */ 614 ret = 1; 615 } else { 616 /* Scan is disabled */ 617 ret = 0; 618 } 619 620 /* 621 * When disabling scan mode you must first change the MII register 622 * address 623 */ 624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 625 PHYAddr[0] | MII_SCAN_REGISTER); 626 627 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 628 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 629 MAC_MII_CONTROL_RC) << 16)); 630 631 return ret; 632 } 633 634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 635 u16 regAddr, u16 value, u32 phyAddr) 636 { 637 struct ql3xxx_port_registers __iomem *port_regs = 638 qdev->mem_map_registers; 639 u8 scanWasEnabled; 640 641 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 642 643 if (ql_wait_for_mii_ready(qdev)) { 644 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 645 return -1; 646 } 647 648 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 649 phyAddr | regAddr); 650 651 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 652 653 /* Wait for write to complete 9/10/04 SJP */ 654 if (ql_wait_for_mii_ready(qdev)) { 655 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 656 return -1; 657 } 658 659 if (scanWasEnabled) 660 ql_mii_enable_scan_mode(qdev); 661 662 return 0; 663 } 664 665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 666 u16 *value, u32 phyAddr) 667 { 668 struct ql3xxx_port_registers __iomem *port_regs = 669 qdev->mem_map_registers; 670 u8 scanWasEnabled; 671 u32 temp; 672 673 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 674 675 if (ql_wait_for_mii_ready(qdev)) { 676 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 677 return -1; 678 } 679 680 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 681 phyAddr | regAddr); 682 683 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 684 (MAC_MII_CONTROL_RC << 16)); 685 686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 687 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 688 689 /* Wait for the read to complete */ 690 if (ql_wait_for_mii_ready(qdev)) { 691 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 692 return -1; 693 } 694 695 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 696 *value = (u16) temp; 697 698 if (scanWasEnabled) 699 ql_mii_enable_scan_mode(qdev); 700 701 return 0; 702 } 703 704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 705 { 706 struct ql3xxx_port_registers __iomem *port_regs = 707 qdev->mem_map_registers; 708 709 ql_mii_disable_scan_mode(qdev); 710 711 if (ql_wait_for_mii_ready(qdev)) { 712 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 713 return -1; 714 } 715 716 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 717 qdev->PHYAddr | regAddr); 718 719 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 720 721 /* Wait for write to complete. */ 722 if (ql_wait_for_mii_ready(qdev)) { 723 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 724 return -1; 725 } 726 727 ql_mii_enable_scan_mode(qdev); 728 729 return 0; 730 } 731 732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 733 { 734 u32 temp; 735 struct ql3xxx_port_registers __iomem *port_regs = 736 qdev->mem_map_registers; 737 738 ql_mii_disable_scan_mode(qdev); 739 740 if (ql_wait_for_mii_ready(qdev)) { 741 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 742 return -1; 743 } 744 745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 746 qdev->PHYAddr | regAddr); 747 748 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 749 (MAC_MII_CONTROL_RC << 16)); 750 751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 752 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 753 754 /* Wait for the read to complete */ 755 if (ql_wait_for_mii_ready(qdev)) { 756 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 757 return -1; 758 } 759 760 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 761 *value = (u16) temp; 762 763 ql_mii_enable_scan_mode(qdev); 764 765 return 0; 766 } 767 768 static void ql_petbi_reset(struct ql3_adapter *qdev) 769 { 770 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 771 } 772 773 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 774 { 775 u16 reg; 776 777 /* Enable Auto-negotiation sense */ 778 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 779 reg |= PETBI_TBI_AUTO_SENSE; 780 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 781 782 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 783 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 784 785 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 786 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 787 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 788 789 } 790 791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 792 { 793 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 794 PHYAddr[qdev->mac_index]); 795 } 796 797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 798 { 799 u16 reg; 800 801 /* Enable Auto-negotiation sense */ 802 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 803 PHYAddr[qdev->mac_index]); 804 reg |= PETBI_TBI_AUTO_SENSE; 805 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 806 PHYAddr[qdev->mac_index]); 807 808 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 809 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 810 PHYAddr[qdev->mac_index]); 811 812 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 813 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 814 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 815 PHYAddr[qdev->mac_index]); 816 } 817 818 static void ql_petbi_init(struct ql3_adapter *qdev) 819 { 820 ql_petbi_reset(qdev); 821 ql_petbi_start_neg(qdev); 822 } 823 824 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 825 { 826 ql_petbi_reset_ex(qdev); 827 ql_petbi_start_neg_ex(qdev); 828 } 829 830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 831 { 832 u16 reg; 833 834 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 835 return 0; 836 837 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 838 } 839 840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 841 { 842 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 843 /* power down device bit 11 = 1 */ 844 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 845 /* enable diagnostic mode bit 2 = 1 */ 846 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 847 /* 1000MB amplitude adjust (see Agere errata) */ 848 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 849 /* 1000MB amplitude adjust (see Agere errata) */ 850 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 851 /* 100MB amplitude adjust (see Agere errata) */ 852 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 853 /* 100MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 855 /* 10MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 857 /* 10MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 859 /* point to hidden reg 0x2806 */ 860 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 861 /* Write new PHYAD w/bit 5 set */ 862 ql_mii_write_reg_ex(qdev, 0x11, 863 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 864 /* 865 * Disable diagnostic mode bit 2 = 0 866 * Power up device bit 11 = 0 867 * Link up (on) and activity (blink) 868 */ 869 ql_mii_write_reg(qdev, 0x12, 0x840a); 870 ql_mii_write_reg(qdev, 0x00, 0x1140); 871 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 872 } 873 874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 875 u16 phyIdReg0, u16 phyIdReg1) 876 { 877 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 878 u32 oui; 879 u16 model; 880 int i; 881 882 if (phyIdReg0 == 0xffff) 883 return result; 884 885 if (phyIdReg1 == 0xffff) 886 return result; 887 888 /* oui is split between two registers */ 889 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 890 891 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 892 893 /* Scan table for this PHY */ 894 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 895 if ((oui == PHY_DEVICES[i].phyIdOUI) && 896 (model == PHY_DEVICES[i].phyIdModel)) { 897 netdev_info(qdev->ndev, "Phy: %s\n", 898 PHY_DEVICES[i].name); 899 result = PHY_DEVICES[i].phyDevice; 900 break; 901 } 902 } 903 904 return result; 905 } 906 907 static int ql_phy_get_speed(struct ql3_adapter *qdev) 908 { 909 u16 reg; 910 911 switch (qdev->phyType) { 912 case PHY_AGERE_ET1011C: { 913 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 914 return 0; 915 916 reg = (reg >> 8) & 3; 917 break; 918 } 919 default: 920 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 921 return 0; 922 923 reg = (((reg & 0x18) >> 3) & 3); 924 } 925 926 switch (reg) { 927 case 2: 928 return SPEED_1000; 929 case 1: 930 return SPEED_100; 931 case 0: 932 return SPEED_10; 933 default: 934 return -1; 935 } 936 } 937 938 static int ql_is_full_dup(struct ql3_adapter *qdev) 939 { 940 u16 reg; 941 942 switch (qdev->phyType) { 943 case PHY_AGERE_ET1011C: { 944 if (ql_mii_read_reg(qdev, 0x1A, ®)) 945 return 0; 946 947 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 948 } 949 case PHY_VITESSE_VSC8211: 950 default: { 951 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 952 return 0; 953 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 954 } 955 } 956 } 957 958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 959 { 960 u16 reg; 961 962 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 963 return 0; 964 965 return (reg & PHY_NEG_PAUSE) != 0; 966 } 967 968 static int PHY_Setup(struct ql3_adapter *qdev) 969 { 970 u16 reg1; 971 u16 reg2; 972 bool agereAddrChangeNeeded = false; 973 u32 miiAddr = 0; 974 int err; 975 976 /* Determine the PHY we are using by reading the ID's */ 977 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 978 if (err != 0) { 979 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 980 return err; 981 } 982 983 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 984 if (err != 0) { 985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 986 return err; 987 } 988 989 /* Check if we have a Agere PHY */ 990 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 991 992 /* Determine which MII address we should be using 993 determined by the index of the card */ 994 if (qdev->mac_index == 0) 995 miiAddr = MII_AGERE_ADDR_1; 996 else 997 miiAddr = MII_AGERE_ADDR_2; 998 999 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1000 if (err != 0) { 1001 netdev_err(qdev->ndev, 1002 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1003 return err; 1004 } 1005 1006 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1007 if (err != 0) { 1008 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1009 return err; 1010 } 1011 1012 /* We need to remember to initialize the Agere PHY */ 1013 agereAddrChangeNeeded = true; 1014 } 1015 1016 /* Determine the particular PHY we have on board to apply 1017 PHY specific initializations */ 1018 qdev->phyType = getPhyType(qdev, reg1, reg2); 1019 1020 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1021 /* need this here so address gets changed */ 1022 phyAgereSpecificInit(qdev, miiAddr); 1023 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1024 netdev_err(qdev->ndev, "PHY is unknown\n"); 1025 return -EIO; 1026 } 1027 1028 return 0; 1029 } 1030 1031 /* 1032 * Caller holds hw_lock. 1033 */ 1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1035 { 1036 struct ql3xxx_port_registers __iomem *port_regs = 1037 qdev->mem_map_registers; 1038 u32 value; 1039 1040 if (enable) 1041 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1042 else 1043 value = (MAC_CONFIG_REG_PE << 16); 1044 1045 if (qdev->mac_index) 1046 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1047 else 1048 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1049 } 1050 1051 /* 1052 * Caller holds hw_lock. 1053 */ 1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1055 { 1056 struct ql3xxx_port_registers __iomem *port_regs = 1057 qdev->mem_map_registers; 1058 u32 value; 1059 1060 if (enable) 1061 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1062 else 1063 value = (MAC_CONFIG_REG_SR << 16); 1064 1065 if (qdev->mac_index) 1066 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1067 else 1068 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1069 } 1070 1071 /* 1072 * Caller holds hw_lock. 1073 */ 1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1075 { 1076 struct ql3xxx_port_registers __iomem *port_regs = 1077 qdev->mem_map_registers; 1078 u32 value; 1079 1080 if (enable) 1081 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1082 else 1083 value = (MAC_CONFIG_REG_GM << 16); 1084 1085 if (qdev->mac_index) 1086 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1087 else 1088 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1089 } 1090 1091 /* 1092 * Caller holds hw_lock. 1093 */ 1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1095 { 1096 struct ql3xxx_port_registers __iomem *port_regs = 1097 qdev->mem_map_registers; 1098 u32 value; 1099 1100 if (enable) 1101 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1102 else 1103 value = (MAC_CONFIG_REG_FD << 16); 1104 1105 if (qdev->mac_index) 1106 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1107 else 1108 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1109 } 1110 1111 /* 1112 * Caller holds hw_lock. 1113 */ 1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1115 { 1116 struct ql3xxx_port_registers __iomem *port_regs = 1117 qdev->mem_map_registers; 1118 u32 value; 1119 1120 if (enable) 1121 value = 1122 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1123 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1124 else 1125 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1126 1127 if (qdev->mac_index) 1128 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1129 else 1130 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1131 } 1132 1133 /* 1134 * Caller holds hw_lock. 1135 */ 1136 static int ql_is_fiber(struct ql3_adapter *qdev) 1137 { 1138 struct ql3xxx_port_registers __iomem *port_regs = 1139 qdev->mem_map_registers; 1140 u32 bitToCheck = 0; 1141 u32 temp; 1142 1143 switch (qdev->mac_index) { 1144 case 0: 1145 bitToCheck = PORT_STATUS_SM0; 1146 break; 1147 case 1: 1148 bitToCheck = PORT_STATUS_SM1; 1149 break; 1150 } 1151 1152 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1153 return (temp & bitToCheck) != 0; 1154 } 1155 1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1157 { 1158 u16 reg; 1159 ql_mii_read_reg(qdev, 0x00, ®); 1160 return (reg & 0x1000) != 0; 1161 } 1162 1163 /* 1164 * Caller holds hw_lock. 1165 */ 1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1167 { 1168 struct ql3xxx_port_registers __iomem *port_regs = 1169 qdev->mem_map_registers; 1170 u32 bitToCheck = 0; 1171 u32 temp; 1172 1173 switch (qdev->mac_index) { 1174 case 0: 1175 bitToCheck = PORT_STATUS_AC0; 1176 break; 1177 case 1: 1178 bitToCheck = PORT_STATUS_AC1; 1179 break; 1180 } 1181 1182 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1183 if (temp & bitToCheck) { 1184 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1185 return 1; 1186 } 1187 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1188 return 0; 1189 } 1190 1191 /* 1192 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1193 */ 1194 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1195 { 1196 if (ql_is_fiber(qdev)) 1197 return ql_is_petbi_neg_pause(qdev); 1198 else 1199 return ql_is_phy_neg_pause(qdev); 1200 } 1201 1202 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1203 { 1204 struct ql3xxx_port_registers __iomem *port_regs = 1205 qdev->mem_map_registers; 1206 u32 bitToCheck = 0; 1207 u32 temp; 1208 1209 switch (qdev->mac_index) { 1210 case 0: 1211 bitToCheck = PORT_STATUS_AE0; 1212 break; 1213 case 1: 1214 bitToCheck = PORT_STATUS_AE1; 1215 break; 1216 } 1217 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1218 return (temp & bitToCheck) != 0; 1219 } 1220 1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1222 { 1223 if (ql_is_fiber(qdev)) 1224 return SPEED_1000; 1225 else 1226 return ql_phy_get_speed(qdev); 1227 } 1228 1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1230 { 1231 if (ql_is_fiber(qdev)) 1232 return 1; 1233 else 1234 return ql_is_full_dup(qdev); 1235 } 1236 1237 /* 1238 * Caller holds hw_lock. 1239 */ 1240 static int ql_link_down_detect(struct ql3_adapter *qdev) 1241 { 1242 struct ql3xxx_port_registers __iomem *port_regs = 1243 qdev->mem_map_registers; 1244 u32 bitToCheck = 0; 1245 u32 temp; 1246 1247 switch (qdev->mac_index) { 1248 case 0: 1249 bitToCheck = ISP_CONTROL_LINK_DN_0; 1250 break; 1251 case 1: 1252 bitToCheck = ISP_CONTROL_LINK_DN_1; 1253 break; 1254 } 1255 1256 temp = 1257 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1258 return (temp & bitToCheck) != 0; 1259 } 1260 1261 /* 1262 * Caller holds hw_lock. 1263 */ 1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1265 { 1266 struct ql3xxx_port_registers __iomem *port_regs = 1267 qdev->mem_map_registers; 1268 1269 switch (qdev->mac_index) { 1270 case 0: 1271 ql_write_common_reg(qdev, 1272 &port_regs->CommonRegs.ispControlStatus, 1273 (ISP_CONTROL_LINK_DN_0) | 1274 (ISP_CONTROL_LINK_DN_0 << 16)); 1275 break; 1276 1277 case 1: 1278 ql_write_common_reg(qdev, 1279 &port_regs->CommonRegs.ispControlStatus, 1280 (ISP_CONTROL_LINK_DN_1) | 1281 (ISP_CONTROL_LINK_DN_1 << 16)); 1282 break; 1283 1284 default: 1285 return 1; 1286 } 1287 1288 return 0; 1289 } 1290 1291 /* 1292 * Caller holds hw_lock. 1293 */ 1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1295 { 1296 struct ql3xxx_port_registers __iomem *port_regs = 1297 qdev->mem_map_registers; 1298 u32 bitToCheck = 0; 1299 u32 temp; 1300 1301 switch (qdev->mac_index) { 1302 case 0: 1303 bitToCheck = PORT_STATUS_F1_ENABLED; 1304 break; 1305 case 1: 1306 bitToCheck = PORT_STATUS_F3_ENABLED; 1307 break; 1308 default: 1309 break; 1310 } 1311 1312 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1313 if (temp & bitToCheck) { 1314 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1315 "not link master\n"); 1316 return 0; 1317 } 1318 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1320 return 1; 1321 } 1322 1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1324 { 1325 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1326 PHYAddr[qdev->mac_index]); 1327 } 1328 1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1330 { 1331 u16 reg; 1332 u16 portConfiguration; 1333 1334 if (qdev->phyType == PHY_AGERE_ET1011C) 1335 ql_mii_write_reg(qdev, 0x13, 0x0000); 1336 /* turn off external loopback */ 1337 1338 if (qdev->mac_index == 0) 1339 portConfiguration = 1340 qdev->nvram_data.macCfg_port0.portConfiguration; 1341 else 1342 portConfiguration = 1343 qdev->nvram_data.macCfg_port1.portConfiguration; 1344 1345 /* Some HBA's in the field are set to 0 and they need to 1346 be reinterpreted with a default value */ 1347 if (portConfiguration == 0) 1348 portConfiguration = PORT_CONFIG_DEFAULT; 1349 1350 /* Set the 1000 advertisements */ 1351 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1352 PHYAddr[qdev->mac_index]); 1353 reg &= ~PHY_GIG_ALL_PARAMS; 1354 1355 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1356 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1357 reg |= PHY_GIG_ADV_1000F; 1358 else 1359 reg |= PHY_GIG_ADV_1000H; 1360 } 1361 1362 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1363 PHYAddr[qdev->mac_index]); 1364 1365 /* Set the 10/100 & pause negotiation advertisements */ 1366 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1367 PHYAddr[qdev->mac_index]); 1368 reg &= ~PHY_NEG_ALL_PARAMS; 1369 1370 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1371 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1372 1373 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1374 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1375 reg |= PHY_NEG_ADV_100F; 1376 1377 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1378 reg |= PHY_NEG_ADV_10F; 1379 } 1380 1381 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1382 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1383 reg |= PHY_NEG_ADV_100H; 1384 1385 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1386 reg |= PHY_NEG_ADV_10H; 1387 } 1388 1389 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1390 reg |= 1; 1391 1392 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1393 PHYAddr[qdev->mac_index]); 1394 1395 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1396 1397 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1398 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1399 PHYAddr[qdev->mac_index]); 1400 } 1401 1402 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1403 { 1404 ql_phy_reset_ex(qdev); 1405 PHY_Setup(qdev); 1406 ql_phy_start_neg_ex(qdev); 1407 } 1408 1409 /* 1410 * Caller holds hw_lock. 1411 */ 1412 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1413 { 1414 struct ql3xxx_port_registers __iomem *port_regs = 1415 qdev->mem_map_registers; 1416 u32 bitToCheck = 0; 1417 u32 temp, linkState; 1418 1419 switch (qdev->mac_index) { 1420 case 0: 1421 bitToCheck = PORT_STATUS_UP0; 1422 break; 1423 case 1: 1424 bitToCheck = PORT_STATUS_UP1; 1425 break; 1426 } 1427 1428 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1429 if (temp & bitToCheck) 1430 linkState = LS_UP; 1431 else 1432 linkState = LS_DOWN; 1433 1434 return linkState; 1435 } 1436 1437 static int ql_port_start(struct ql3_adapter *qdev) 1438 { 1439 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1440 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1441 2) << 7)) { 1442 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1443 return -1; 1444 } 1445 1446 if (ql_is_fiber(qdev)) { 1447 ql_petbi_init(qdev); 1448 } else { 1449 /* Copper port */ 1450 ql_phy_init_ex(qdev); 1451 } 1452 1453 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1454 return 0; 1455 } 1456 1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1458 { 1459 1460 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1461 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1462 2) << 7)) 1463 return -1; 1464 1465 if (!ql_auto_neg_error(qdev)) { 1466 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1467 /* configure the MAC */ 1468 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1469 "Configuring link\n"); 1470 ql_mac_cfg_soft_reset(qdev, 1); 1471 ql_mac_cfg_gig(qdev, 1472 (ql_get_link_speed 1473 (qdev) == 1474 SPEED_1000)); 1475 ql_mac_cfg_full_dup(qdev, 1476 ql_is_link_full_dup 1477 (qdev)); 1478 ql_mac_cfg_pause(qdev, 1479 ql_is_neg_pause 1480 (qdev)); 1481 ql_mac_cfg_soft_reset(qdev, 0); 1482 1483 /* enable the MAC */ 1484 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1485 "Enabling mac\n"); 1486 ql_mac_enable(qdev, 1); 1487 } 1488 1489 qdev->port_link_state = LS_UP; 1490 netif_start_queue(qdev->ndev); 1491 netif_carrier_on(qdev->ndev); 1492 netif_info(qdev, link, qdev->ndev, 1493 "Link is up at %d Mbps, %s duplex\n", 1494 ql_get_link_speed(qdev), 1495 ql_is_link_full_dup(qdev) ? "full" : "half"); 1496 1497 } else { /* Remote error detected */ 1498 1499 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1501 "Remote error detected. Calling ql_port_start()\n"); 1502 /* 1503 * ql_port_start() is shared code and needs 1504 * to lock the PHY on it's own. 1505 */ 1506 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1507 if (ql_port_start(qdev)) /* Restart port */ 1508 return -1; 1509 return 0; 1510 } 1511 } 1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1513 return 0; 1514 } 1515 1516 static void ql_link_state_machine_work(struct work_struct *work) 1517 { 1518 struct ql3_adapter *qdev = 1519 container_of(work, struct ql3_adapter, link_state_work.work); 1520 1521 u32 curr_link_state; 1522 unsigned long hw_flags; 1523 1524 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1525 1526 curr_link_state = ql_get_link_state(qdev); 1527 1528 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1529 netif_info(qdev, link, qdev->ndev, 1530 "Reset in progress, skip processing link state\n"); 1531 1532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1533 1534 /* Restart timer on 2 second interval. */ 1535 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1536 1537 return; 1538 } 1539 1540 switch (qdev->port_link_state) { 1541 default: 1542 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1543 ql_port_start(qdev); 1544 qdev->port_link_state = LS_DOWN; 1545 /* Fall Through */ 1546 1547 case LS_DOWN: 1548 if (curr_link_state == LS_UP) { 1549 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1550 if (ql_is_auto_neg_complete(qdev)) 1551 ql_finish_auto_neg(qdev); 1552 1553 if (qdev->port_link_state == LS_UP) 1554 ql_link_down_detect_clear(qdev); 1555 1556 qdev->port_link_state = LS_UP; 1557 } 1558 break; 1559 1560 case LS_UP: 1561 /* 1562 * See if the link is currently down or went down and came 1563 * back up 1564 */ 1565 if (curr_link_state == LS_DOWN) { 1566 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1567 qdev->port_link_state = LS_DOWN; 1568 } 1569 if (ql_link_down_detect(qdev)) 1570 qdev->port_link_state = LS_DOWN; 1571 break; 1572 } 1573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1574 1575 /* Restart timer on 2 second interval. */ 1576 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1577 } 1578 1579 /* 1580 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1581 */ 1582 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1583 { 1584 if (ql_this_adapter_controls_port(qdev)) 1585 set_bit(QL_LINK_MASTER, &qdev->flags); 1586 else 1587 clear_bit(QL_LINK_MASTER, &qdev->flags); 1588 } 1589 1590 /* 1591 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1592 */ 1593 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1594 { 1595 ql_mii_enable_scan_mode(qdev); 1596 1597 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1598 if (ql_this_adapter_controls_port(qdev)) 1599 ql_petbi_init_ex(qdev); 1600 } else { 1601 if (ql_this_adapter_controls_port(qdev)) 1602 ql_phy_init_ex(qdev); 1603 } 1604 } 1605 1606 /* 1607 * MII_Setup needs to be called before taking the PHY out of reset 1608 * so that the management interface clock speed can be set properly. 1609 * It would be better if we had a way to disable MDC until after the 1610 * PHY is out of reset, but we don't have that capability. 1611 */ 1612 static int ql_mii_setup(struct ql3_adapter *qdev) 1613 { 1614 u32 reg; 1615 struct ql3xxx_port_registers __iomem *port_regs = 1616 qdev->mem_map_registers; 1617 1618 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1619 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1620 2) << 7)) 1621 return -1; 1622 1623 if (qdev->device_id == QL3032_DEVICE_ID) 1624 ql_write_page0_reg(qdev, 1625 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1626 1627 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1628 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1629 1630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1631 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1632 1633 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1634 return 0; 1635 } 1636 1637 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1638 SUPPORTED_FIBRE | \ 1639 SUPPORTED_Autoneg) 1640 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1641 SUPPORTED_10baseT_Full | \ 1642 SUPPORTED_100baseT_Half | \ 1643 SUPPORTED_100baseT_Full | \ 1644 SUPPORTED_1000baseT_Half | \ 1645 SUPPORTED_1000baseT_Full | \ 1646 SUPPORTED_Autoneg | \ 1647 SUPPORTED_TP) \ 1648 1649 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1650 { 1651 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1652 return SUPPORTED_OPTICAL_MODES; 1653 1654 return SUPPORTED_TP_MODES; 1655 } 1656 1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1658 { 1659 int status; 1660 unsigned long hw_flags; 1661 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1662 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1663 (QL_RESOURCE_BITS_BASE_CODE | 1664 (qdev->mac_index) * 2) << 7)) { 1665 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1666 return 0; 1667 } 1668 status = ql_is_auto_cfg(qdev); 1669 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1671 return status; 1672 } 1673 1674 static u32 ql_get_speed(struct ql3_adapter *qdev) 1675 { 1676 u32 status; 1677 unsigned long hw_flags; 1678 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1679 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1680 (QL_RESOURCE_BITS_BASE_CODE | 1681 (qdev->mac_index) * 2) << 7)) { 1682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1683 return 0; 1684 } 1685 status = ql_get_link_speed(qdev); 1686 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1688 return status; 1689 } 1690 1691 static int ql_get_full_dup(struct ql3_adapter *qdev) 1692 { 1693 int status; 1694 unsigned long hw_flags; 1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1696 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1697 (QL_RESOURCE_BITS_BASE_CODE | 1698 (qdev->mac_index) * 2) << 7)) { 1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1700 return 0; 1701 } 1702 status = ql_is_link_full_dup(qdev); 1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1705 return status; 1706 } 1707 1708 static int ql_get_link_ksettings(struct net_device *ndev, 1709 struct ethtool_link_ksettings *cmd) 1710 { 1711 struct ql3_adapter *qdev = netdev_priv(ndev); 1712 u32 supported, advertising; 1713 1714 supported = ql_supported_modes(qdev); 1715 1716 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1717 cmd->base.port = PORT_FIBRE; 1718 } else { 1719 cmd->base.port = PORT_TP; 1720 cmd->base.phy_address = qdev->PHYAddr; 1721 } 1722 advertising = ql_supported_modes(qdev); 1723 cmd->base.autoneg = ql_get_auto_cfg_status(qdev); 1724 cmd->base.speed = ql_get_speed(qdev); 1725 cmd->base.duplex = ql_get_full_dup(qdev); 1726 1727 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1728 supported); 1729 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1730 advertising); 1731 1732 return 0; 1733 } 1734 1735 static void ql_get_drvinfo(struct net_device *ndev, 1736 struct ethtool_drvinfo *drvinfo) 1737 { 1738 struct ql3_adapter *qdev = netdev_priv(ndev); 1739 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1740 strlcpy(drvinfo->version, ql3xxx_driver_version, 1741 sizeof(drvinfo->version)); 1742 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1743 sizeof(drvinfo->bus_info)); 1744 } 1745 1746 static u32 ql_get_msglevel(struct net_device *ndev) 1747 { 1748 struct ql3_adapter *qdev = netdev_priv(ndev); 1749 return qdev->msg_enable; 1750 } 1751 1752 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1753 { 1754 struct ql3_adapter *qdev = netdev_priv(ndev); 1755 qdev->msg_enable = value; 1756 } 1757 1758 static void ql_get_pauseparam(struct net_device *ndev, 1759 struct ethtool_pauseparam *pause) 1760 { 1761 struct ql3_adapter *qdev = netdev_priv(ndev); 1762 struct ql3xxx_port_registers __iomem *port_regs = 1763 qdev->mem_map_registers; 1764 1765 u32 reg; 1766 if (qdev->mac_index == 0) 1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1768 else 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1770 1771 pause->autoneg = ql_get_auto_cfg_status(qdev); 1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1774 } 1775 1776 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1777 .get_drvinfo = ql_get_drvinfo, 1778 .get_link = ethtool_op_get_link, 1779 .get_msglevel = ql_get_msglevel, 1780 .set_msglevel = ql_set_msglevel, 1781 .get_pauseparam = ql_get_pauseparam, 1782 .get_link_ksettings = ql_get_link_ksettings, 1783 }; 1784 1785 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1786 { 1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1788 dma_addr_t map; 1789 int err; 1790 1791 while (lrg_buf_cb) { 1792 if (!lrg_buf_cb->skb) { 1793 lrg_buf_cb->skb = 1794 netdev_alloc_skb(qdev->ndev, 1795 qdev->lrg_buffer_len); 1796 if (unlikely(!lrg_buf_cb->skb)) { 1797 netdev_printk(KERN_DEBUG, qdev->ndev, 1798 "Failed netdev_alloc_skb()\n"); 1799 break; 1800 } else { 1801 /* 1802 * We save some space to copy the ethhdr from 1803 * first buffer 1804 */ 1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1806 map = pci_map_single(qdev->pdev, 1807 lrg_buf_cb->skb->data, 1808 qdev->lrg_buffer_len - 1809 QL_HEADER_SPACE, 1810 PCI_DMA_FROMDEVICE); 1811 1812 err = pci_dma_mapping_error(qdev->pdev, map); 1813 if (err) { 1814 netdev_err(qdev->ndev, 1815 "PCI mapping failed with error: %d\n", 1816 err); 1817 dev_kfree_skb(lrg_buf_cb->skb); 1818 lrg_buf_cb->skb = NULL; 1819 break; 1820 } 1821 1822 1823 lrg_buf_cb->buf_phy_addr_low = 1824 cpu_to_le32(LS_64BITS(map)); 1825 lrg_buf_cb->buf_phy_addr_high = 1826 cpu_to_le32(MS_64BITS(map)); 1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1828 dma_unmap_len_set(lrg_buf_cb, maplen, 1829 qdev->lrg_buffer_len - 1830 QL_HEADER_SPACE); 1831 --qdev->lrg_buf_skb_check; 1832 if (!qdev->lrg_buf_skb_check) 1833 return 1; 1834 } 1835 } 1836 lrg_buf_cb = lrg_buf_cb->next; 1837 } 1838 return 0; 1839 } 1840 1841 /* 1842 * Caller holds hw_lock. 1843 */ 1844 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1845 { 1846 struct ql3xxx_port_registers __iomem *port_regs = 1847 qdev->mem_map_registers; 1848 1849 if (qdev->small_buf_release_cnt >= 16) { 1850 while (qdev->small_buf_release_cnt >= 16) { 1851 qdev->small_buf_q_producer_index++; 1852 1853 if (qdev->small_buf_q_producer_index == 1854 NUM_SBUFQ_ENTRIES) 1855 qdev->small_buf_q_producer_index = 0; 1856 qdev->small_buf_release_cnt -= 8; 1857 } 1858 wmb(); 1859 writel_relaxed(qdev->small_buf_q_producer_index, 1860 &port_regs->CommonRegs.rxSmallQProducerIndex); 1861 mmiowb(); 1862 } 1863 } 1864 1865 /* 1866 * Caller holds hw_lock. 1867 */ 1868 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1869 { 1870 struct bufq_addr_element *lrg_buf_q_ele; 1871 int i; 1872 struct ql_rcv_buf_cb *lrg_buf_cb; 1873 struct ql3xxx_port_registers __iomem *port_regs = 1874 qdev->mem_map_registers; 1875 1876 if ((qdev->lrg_buf_free_count >= 8) && 1877 (qdev->lrg_buf_release_cnt >= 16)) { 1878 1879 if (qdev->lrg_buf_skb_check) 1880 if (!ql_populate_free_queue(qdev)) 1881 return; 1882 1883 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1884 1885 while ((qdev->lrg_buf_release_cnt >= 16) && 1886 (qdev->lrg_buf_free_count >= 8)) { 1887 1888 for (i = 0; i < 8; i++) { 1889 lrg_buf_cb = 1890 ql_get_from_lrg_buf_free_list(qdev); 1891 lrg_buf_q_ele->addr_high = 1892 lrg_buf_cb->buf_phy_addr_high; 1893 lrg_buf_q_ele->addr_low = 1894 lrg_buf_cb->buf_phy_addr_low; 1895 lrg_buf_q_ele++; 1896 1897 qdev->lrg_buf_release_cnt--; 1898 } 1899 1900 qdev->lrg_buf_q_producer_index++; 1901 1902 if (qdev->lrg_buf_q_producer_index == 1903 qdev->num_lbufq_entries) 1904 qdev->lrg_buf_q_producer_index = 0; 1905 1906 if (qdev->lrg_buf_q_producer_index == 1907 (qdev->num_lbufq_entries - 1)) { 1908 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1909 } 1910 } 1911 wmb(); 1912 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1913 writel(qdev->lrg_buf_q_producer_index, 1914 &port_regs->CommonRegs.rxLargeQProducerIndex); 1915 } 1916 } 1917 1918 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1919 struct ob_mac_iocb_rsp *mac_rsp) 1920 { 1921 struct ql_tx_buf_cb *tx_cb; 1922 int i; 1923 1924 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1925 netdev_warn(qdev->ndev, 1926 "Frame too short but it was padded and sent\n"); 1927 } 1928 1929 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1930 1931 /* Check the transmit response flags for any errors */ 1932 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1933 netdev_err(qdev->ndev, 1934 "Frame too short to be legal, frame not sent\n"); 1935 1936 qdev->ndev->stats.tx_errors++; 1937 goto frame_not_sent; 1938 } 1939 1940 if (tx_cb->seg_count == 0) { 1941 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1942 mac_rsp->transaction_id); 1943 1944 qdev->ndev->stats.tx_errors++; 1945 goto invalid_seg_count; 1946 } 1947 1948 pci_unmap_single(qdev->pdev, 1949 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1950 dma_unmap_len(&tx_cb->map[0], maplen), 1951 PCI_DMA_TODEVICE); 1952 tx_cb->seg_count--; 1953 if (tx_cb->seg_count) { 1954 for (i = 1; i < tx_cb->seg_count; i++) { 1955 pci_unmap_page(qdev->pdev, 1956 dma_unmap_addr(&tx_cb->map[i], 1957 mapaddr), 1958 dma_unmap_len(&tx_cb->map[i], maplen), 1959 PCI_DMA_TODEVICE); 1960 } 1961 } 1962 qdev->ndev->stats.tx_packets++; 1963 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1964 1965 frame_not_sent: 1966 dev_kfree_skb_irq(tx_cb->skb); 1967 tx_cb->skb = NULL; 1968 1969 invalid_seg_count: 1970 atomic_inc(&qdev->tx_count); 1971 } 1972 1973 static void ql_get_sbuf(struct ql3_adapter *qdev) 1974 { 1975 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1976 qdev->small_buf_index = 0; 1977 qdev->small_buf_release_cnt++; 1978 } 1979 1980 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1981 { 1982 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1983 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1984 qdev->lrg_buf_release_cnt++; 1985 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1986 qdev->lrg_buf_index = 0; 1987 return lrg_buf_cb; 1988 } 1989 1990 /* 1991 * The difference between 3022 and 3032 for inbound completions: 1992 * 3022 uses two buffers per completion. The first buffer contains 1993 * (some) header info, the second the remainder of the headers plus 1994 * the data. For this chip we reserve some space at the top of the 1995 * receive buffer so that the header info in buffer one can be 1996 * prepended to the buffer two. Buffer two is the sent up while 1997 * buffer one is returned to the hardware to be reused. 1998 * 3032 receives all of it's data and headers in one buffer for a 1999 * simpler process. 3032 also supports checksum verification as 2000 * can be seen in ql_process_macip_rx_intr(). 2001 */ 2002 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2003 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2004 { 2005 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2006 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2007 struct sk_buff *skb; 2008 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2009 2010 /* 2011 * Get the inbound address list (small buffer). 2012 */ 2013 ql_get_sbuf(qdev); 2014 2015 if (qdev->device_id == QL3022_DEVICE_ID) 2016 lrg_buf_cb1 = ql_get_lbuf(qdev); 2017 2018 /* start of second buffer */ 2019 lrg_buf_cb2 = ql_get_lbuf(qdev); 2020 skb = lrg_buf_cb2->skb; 2021 2022 qdev->ndev->stats.rx_packets++; 2023 qdev->ndev->stats.rx_bytes += length; 2024 2025 skb_put(skb, length); 2026 pci_unmap_single(qdev->pdev, 2027 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2028 dma_unmap_len(lrg_buf_cb2, maplen), 2029 PCI_DMA_FROMDEVICE); 2030 prefetch(skb->data); 2031 skb_checksum_none_assert(skb); 2032 skb->protocol = eth_type_trans(skb, qdev->ndev); 2033 2034 napi_gro_receive(&qdev->napi, skb); 2035 lrg_buf_cb2->skb = NULL; 2036 2037 if (qdev->device_id == QL3022_DEVICE_ID) 2038 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2039 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2040 } 2041 2042 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2043 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2044 { 2045 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2046 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2047 struct sk_buff *skb1 = NULL, *skb2; 2048 struct net_device *ndev = qdev->ndev; 2049 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2050 u16 size = 0; 2051 2052 /* 2053 * Get the inbound address list (small buffer). 2054 */ 2055 2056 ql_get_sbuf(qdev); 2057 2058 if (qdev->device_id == QL3022_DEVICE_ID) { 2059 /* start of first buffer on 3022 */ 2060 lrg_buf_cb1 = ql_get_lbuf(qdev); 2061 skb1 = lrg_buf_cb1->skb; 2062 size = ETH_HLEN; 2063 if (*((u16 *) skb1->data) != 0xFFFF) 2064 size += VLAN_ETH_HLEN - ETH_HLEN; 2065 } 2066 2067 /* start of second buffer */ 2068 lrg_buf_cb2 = ql_get_lbuf(qdev); 2069 skb2 = lrg_buf_cb2->skb; 2070 2071 skb_put(skb2, length); /* Just the second buffer length here. */ 2072 pci_unmap_single(qdev->pdev, 2073 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2074 dma_unmap_len(lrg_buf_cb2, maplen), 2075 PCI_DMA_FROMDEVICE); 2076 prefetch(skb2->data); 2077 2078 skb_checksum_none_assert(skb2); 2079 if (qdev->device_id == QL3022_DEVICE_ID) { 2080 /* 2081 * Copy the ethhdr from first buffer to second. This 2082 * is necessary for 3022 IP completions. 2083 */ 2084 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2085 skb_push(skb2, size), size); 2086 } else { 2087 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2088 if (checksum & 2089 (IB_IP_IOCB_RSP_3032_ICE | 2090 IB_IP_IOCB_RSP_3032_CE)) { 2091 netdev_err(ndev, 2092 "%s: Bad checksum for this %s packet, checksum = %x\n", 2093 __func__, 2094 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2095 "TCP" : "UDP"), checksum); 2096 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2097 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2098 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2099 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2100 } 2101 } 2102 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2103 2104 napi_gro_receive(&qdev->napi, skb2); 2105 ndev->stats.rx_packets++; 2106 ndev->stats.rx_bytes += length; 2107 lrg_buf_cb2->skb = NULL; 2108 2109 if (qdev->device_id == QL3022_DEVICE_ID) 2110 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2111 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2112 } 2113 2114 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) 2115 { 2116 struct net_rsp_iocb *net_rsp; 2117 struct net_device *ndev = qdev->ndev; 2118 int work_done = 0; 2119 2120 /* While there are entries in the completion queue. */ 2121 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2122 qdev->rsp_consumer_index) && (work_done < budget)) { 2123 2124 net_rsp = qdev->rsp_current; 2125 rmb(); 2126 /* 2127 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2128 * if the inbound completion is for a VLAN. 2129 */ 2130 if (qdev->device_id == QL3032_DEVICE_ID) 2131 net_rsp->opcode &= 0x7f; 2132 switch (net_rsp->opcode) { 2133 2134 case OPCODE_OB_MAC_IOCB_FN0: 2135 case OPCODE_OB_MAC_IOCB_FN2: 2136 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2137 net_rsp); 2138 break; 2139 2140 case OPCODE_IB_MAC_IOCB: 2141 case OPCODE_IB_3032_MAC_IOCB: 2142 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2143 net_rsp); 2144 work_done++; 2145 break; 2146 2147 case OPCODE_IB_IP_IOCB: 2148 case OPCODE_IB_3032_IP_IOCB: 2149 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2150 net_rsp); 2151 work_done++; 2152 break; 2153 default: { 2154 u32 *tmp = (u32 *)net_rsp; 2155 netdev_err(ndev, 2156 "Hit default case, not handled!\n" 2157 " dropping the packet, opcode = %x\n" 2158 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2159 net_rsp->opcode, 2160 (unsigned long int)tmp[0], 2161 (unsigned long int)tmp[1], 2162 (unsigned long int)tmp[2], 2163 (unsigned long int)tmp[3]); 2164 } 2165 } 2166 2167 qdev->rsp_consumer_index++; 2168 2169 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2170 qdev->rsp_consumer_index = 0; 2171 qdev->rsp_current = qdev->rsp_q_virt_addr; 2172 } else { 2173 qdev->rsp_current++; 2174 } 2175 2176 } 2177 2178 return work_done; 2179 } 2180 2181 static int ql_poll(struct napi_struct *napi, int budget) 2182 { 2183 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2184 struct ql3xxx_port_registers __iomem *port_regs = 2185 qdev->mem_map_registers; 2186 int work_done; 2187 2188 work_done = ql_tx_rx_clean(qdev, budget); 2189 2190 if (work_done < budget && napi_complete_done(napi, work_done)) { 2191 unsigned long flags; 2192 2193 spin_lock_irqsave(&qdev->hw_lock, flags); 2194 ql_update_small_bufq_prod_index(qdev); 2195 ql_update_lrg_bufq_prod_index(qdev); 2196 writel(qdev->rsp_consumer_index, 2197 &port_regs->CommonRegs.rspQConsumerIndex); 2198 spin_unlock_irqrestore(&qdev->hw_lock, flags); 2199 2200 ql_enable_interrupts(qdev); 2201 } 2202 return work_done; 2203 } 2204 2205 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2206 { 2207 2208 struct net_device *ndev = dev_id; 2209 struct ql3_adapter *qdev = netdev_priv(ndev); 2210 struct ql3xxx_port_registers __iomem *port_regs = 2211 qdev->mem_map_registers; 2212 u32 value; 2213 int handled = 1; 2214 u32 var; 2215 2216 value = ql_read_common_reg_l(qdev, 2217 &port_regs->CommonRegs.ispControlStatus); 2218 2219 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2220 spin_lock(&qdev->adapter_lock); 2221 netif_stop_queue(qdev->ndev); 2222 netif_carrier_off(qdev->ndev); 2223 ql_disable_interrupts(qdev); 2224 qdev->port_link_state = LS_DOWN; 2225 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2226 2227 if (value & ISP_CONTROL_FE) { 2228 /* 2229 * Chip Fatal Error. 2230 */ 2231 var = 2232 ql_read_page0_reg_l(qdev, 2233 &port_regs->PortFatalErrStatus); 2234 netdev_warn(ndev, 2235 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2236 var); 2237 set_bit(QL_RESET_START, &qdev->flags) ; 2238 } else { 2239 /* 2240 * Soft Reset Requested. 2241 */ 2242 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2243 netdev_err(ndev, 2244 "Another function issued a reset to the chip. ISR value = %x\n", 2245 value); 2246 } 2247 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2248 spin_unlock(&qdev->adapter_lock); 2249 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2250 ql_disable_interrupts(qdev); 2251 if (likely(napi_schedule_prep(&qdev->napi))) 2252 __napi_schedule(&qdev->napi); 2253 } else 2254 return IRQ_NONE; 2255 2256 return IRQ_RETVAL(handled); 2257 } 2258 2259 /* 2260 * Get the total number of segments needed for the given number of fragments. 2261 * This is necessary because outbound address lists (OAL) will be used when 2262 * more than two frags are given. Each address list has 5 addr/len pairs. 2263 * The 5th pair in each OAL is used to point to the next OAL if more frags 2264 * are coming. That is why the frags:segment count ratio is not linear. 2265 */ 2266 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2267 { 2268 if (qdev->device_id == QL3022_DEVICE_ID) 2269 return 1; 2270 2271 if (frags <= 2) 2272 return frags + 1; 2273 else if (frags <= 6) 2274 return frags + 2; 2275 else if (frags <= 10) 2276 return frags + 3; 2277 else if (frags <= 14) 2278 return frags + 4; 2279 else if (frags <= 18) 2280 return frags + 5; 2281 return -1; 2282 } 2283 2284 static void ql_hw_csum_setup(const struct sk_buff *skb, 2285 struct ob_mac_iocb_req *mac_iocb_ptr) 2286 { 2287 const struct iphdr *ip = ip_hdr(skb); 2288 2289 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2290 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2291 2292 if (ip->protocol == IPPROTO_TCP) { 2293 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2294 OB_3032MAC_IOCB_REQ_IC; 2295 } else { 2296 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2297 OB_3032MAC_IOCB_REQ_IC; 2298 } 2299 2300 } 2301 2302 /* 2303 * Map the buffers for this transmit. 2304 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2305 */ 2306 static int ql_send_map(struct ql3_adapter *qdev, 2307 struct ob_mac_iocb_req *mac_iocb_ptr, 2308 struct ql_tx_buf_cb *tx_cb, 2309 struct sk_buff *skb) 2310 { 2311 struct oal *oal; 2312 struct oal_entry *oal_entry; 2313 int len = skb_headlen(skb); 2314 dma_addr_t map; 2315 int err; 2316 int completed_segs, i; 2317 int seg_cnt, seg = 0; 2318 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2319 2320 seg_cnt = tx_cb->seg_count; 2321 /* 2322 * Map the skb buffer first. 2323 */ 2324 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2325 2326 err = pci_dma_mapping_error(qdev->pdev, map); 2327 if (err) { 2328 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2329 err); 2330 2331 return NETDEV_TX_BUSY; 2332 } 2333 2334 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2335 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2336 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2337 oal_entry->len = cpu_to_le32(len); 2338 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2339 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2340 seg++; 2341 2342 if (seg_cnt == 1) { 2343 /* Terminate the last segment. */ 2344 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2345 return NETDEV_TX_OK; 2346 } 2347 oal = tx_cb->oal; 2348 for (completed_segs = 0; 2349 completed_segs < frag_cnt; 2350 completed_segs++, seg++) { 2351 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2352 oal_entry++; 2353 /* 2354 * Check for continuation requirements. 2355 * It's strange but necessary. 2356 * Continuation entry points to outbound address list. 2357 */ 2358 if ((seg == 2 && seg_cnt > 3) || 2359 (seg == 7 && seg_cnt > 8) || 2360 (seg == 12 && seg_cnt > 13) || 2361 (seg == 17 && seg_cnt > 18)) { 2362 map = pci_map_single(qdev->pdev, oal, 2363 sizeof(struct oal), 2364 PCI_DMA_TODEVICE); 2365 2366 err = pci_dma_mapping_error(qdev->pdev, map); 2367 if (err) { 2368 netdev_err(qdev->ndev, 2369 "PCI mapping outbound address list with error: %d\n", 2370 err); 2371 goto map_error; 2372 } 2373 2374 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2375 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2376 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2377 OAL_CONT_ENTRY); 2378 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2379 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2380 sizeof(struct oal)); 2381 oal_entry = (struct oal_entry *)oal; 2382 oal++; 2383 seg++; 2384 } 2385 2386 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2387 DMA_TO_DEVICE); 2388 2389 err = dma_mapping_error(&qdev->pdev->dev, map); 2390 if (err) { 2391 netdev_err(qdev->ndev, 2392 "PCI mapping frags failed with error: %d\n", 2393 err); 2394 goto map_error; 2395 } 2396 2397 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2398 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2399 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2400 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2401 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2402 } 2403 /* Terminate the last segment. */ 2404 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2405 return NETDEV_TX_OK; 2406 2407 map_error: 2408 /* A PCI mapping failed and now we will need to back out 2409 * We need to traverse through the oal's and associated pages which 2410 * have been mapped and now we must unmap them to clean up properly 2411 */ 2412 2413 seg = 1; 2414 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2415 oal = tx_cb->oal; 2416 for (i = 0; i < completed_segs; i++, seg++) { 2417 oal_entry++; 2418 2419 /* 2420 * Check for continuation requirements. 2421 * It's strange but necessary. 2422 */ 2423 2424 if ((seg == 2 && seg_cnt > 3) || 2425 (seg == 7 && seg_cnt > 8) || 2426 (seg == 12 && seg_cnt > 13) || 2427 (seg == 17 && seg_cnt > 18)) { 2428 pci_unmap_single(qdev->pdev, 2429 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2430 dma_unmap_len(&tx_cb->map[seg], maplen), 2431 PCI_DMA_TODEVICE); 2432 oal++; 2433 seg++; 2434 } 2435 2436 pci_unmap_page(qdev->pdev, 2437 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2438 dma_unmap_len(&tx_cb->map[seg], maplen), 2439 PCI_DMA_TODEVICE); 2440 } 2441 2442 pci_unmap_single(qdev->pdev, 2443 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2444 dma_unmap_addr(&tx_cb->map[0], maplen), 2445 PCI_DMA_TODEVICE); 2446 2447 return NETDEV_TX_BUSY; 2448 2449 } 2450 2451 /* 2452 * The difference between 3022 and 3032 sends: 2453 * 3022 only supports a simple single segment transmission. 2454 * 3032 supports checksumming and scatter/gather lists (fragments). 2455 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2456 * in the IOCB plus a chain of outbound address lists (OAL) that 2457 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2458 * will be used to point to an OAL when more ALP entries are required. 2459 * The IOCB is always the top of the chain followed by one or more 2460 * OALs (when necessary). 2461 */ 2462 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2463 struct net_device *ndev) 2464 { 2465 struct ql3_adapter *qdev = netdev_priv(ndev); 2466 struct ql3xxx_port_registers __iomem *port_regs = 2467 qdev->mem_map_registers; 2468 struct ql_tx_buf_cb *tx_cb; 2469 u32 tot_len = skb->len; 2470 struct ob_mac_iocb_req *mac_iocb_ptr; 2471 2472 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2473 return NETDEV_TX_BUSY; 2474 2475 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2476 tx_cb->seg_count = ql_get_seg_count(qdev, 2477 skb_shinfo(skb)->nr_frags); 2478 if (tx_cb->seg_count == -1) { 2479 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2480 return NETDEV_TX_OK; 2481 } 2482 2483 mac_iocb_ptr = tx_cb->queue_entry; 2484 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2485 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2486 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2487 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2488 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2489 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2490 tx_cb->skb = skb; 2491 if (qdev->device_id == QL3032_DEVICE_ID && 2492 skb->ip_summed == CHECKSUM_PARTIAL) 2493 ql_hw_csum_setup(skb, mac_iocb_ptr); 2494 2495 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2496 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2497 return NETDEV_TX_BUSY; 2498 } 2499 2500 wmb(); 2501 qdev->req_producer_index++; 2502 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2503 qdev->req_producer_index = 0; 2504 wmb(); 2505 ql_write_common_reg_l(qdev, 2506 &port_regs->CommonRegs.reqQProducerIndex, 2507 qdev->req_producer_index); 2508 2509 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2510 "tx queued, slot %d, len %d\n", 2511 qdev->req_producer_index, skb->len); 2512 2513 atomic_dec(&qdev->tx_count); 2514 return NETDEV_TX_OK; 2515 } 2516 2517 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2518 { 2519 qdev->req_q_size = 2520 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2521 2522 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2523 2524 /* The barrier is required to ensure request and response queue 2525 * addr writes to the registers. 2526 */ 2527 wmb(); 2528 2529 qdev->req_q_virt_addr = 2530 pci_alloc_consistent(qdev->pdev, 2531 (size_t) qdev->req_q_size, 2532 &qdev->req_q_phy_addr); 2533 2534 if ((qdev->req_q_virt_addr == NULL) || 2535 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2536 netdev_err(qdev->ndev, "reqQ failed\n"); 2537 return -ENOMEM; 2538 } 2539 2540 qdev->rsp_q_virt_addr = 2541 pci_alloc_consistent(qdev->pdev, 2542 (size_t) qdev->rsp_q_size, 2543 &qdev->rsp_q_phy_addr); 2544 2545 if ((qdev->rsp_q_virt_addr == NULL) || 2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2547 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2549 qdev->req_q_virt_addr, 2550 qdev->req_q_phy_addr); 2551 return -ENOMEM; 2552 } 2553 2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2555 2556 return 0; 2557 } 2558 2559 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2560 { 2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2562 netdev_info(qdev->ndev, "Already done\n"); 2563 return; 2564 } 2565 2566 pci_free_consistent(qdev->pdev, 2567 qdev->req_q_size, 2568 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2569 2570 qdev->req_q_virt_addr = NULL; 2571 2572 pci_free_consistent(qdev->pdev, 2573 qdev->rsp_q_size, 2574 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2575 2576 qdev->rsp_q_virt_addr = NULL; 2577 2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2579 } 2580 2581 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2582 { 2583 /* Create Large Buffer Queue */ 2584 qdev->lrg_buf_q_size = 2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2586 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2588 else 2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2590 2591 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2592 sizeof(struct ql_rcv_buf_cb), 2593 GFP_KERNEL); 2594 if (qdev->lrg_buf == NULL) 2595 return -ENOMEM; 2596 2597 qdev->lrg_buf_q_alloc_virt_addr = 2598 pci_alloc_consistent(qdev->pdev, 2599 qdev->lrg_buf_q_alloc_size, 2600 &qdev->lrg_buf_q_alloc_phy_addr); 2601 2602 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2603 netdev_err(qdev->ndev, "lBufQ failed\n"); 2604 return -ENOMEM; 2605 } 2606 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2607 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2608 2609 /* Create Small Buffer Queue */ 2610 qdev->small_buf_q_size = 2611 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2612 if (qdev->small_buf_q_size < PAGE_SIZE) 2613 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2614 else 2615 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2616 2617 qdev->small_buf_q_alloc_virt_addr = 2618 pci_alloc_consistent(qdev->pdev, 2619 qdev->small_buf_q_alloc_size, 2620 &qdev->small_buf_q_alloc_phy_addr); 2621 2622 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2623 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2624 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2625 qdev->lrg_buf_q_alloc_virt_addr, 2626 qdev->lrg_buf_q_alloc_phy_addr); 2627 return -ENOMEM; 2628 } 2629 2630 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2631 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2632 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2633 return 0; 2634 } 2635 2636 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2637 { 2638 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2639 netdev_info(qdev->ndev, "Already done\n"); 2640 return; 2641 } 2642 kfree(qdev->lrg_buf); 2643 pci_free_consistent(qdev->pdev, 2644 qdev->lrg_buf_q_alloc_size, 2645 qdev->lrg_buf_q_alloc_virt_addr, 2646 qdev->lrg_buf_q_alloc_phy_addr); 2647 2648 qdev->lrg_buf_q_virt_addr = NULL; 2649 2650 pci_free_consistent(qdev->pdev, 2651 qdev->small_buf_q_alloc_size, 2652 qdev->small_buf_q_alloc_virt_addr, 2653 qdev->small_buf_q_alloc_phy_addr); 2654 2655 qdev->small_buf_q_virt_addr = NULL; 2656 2657 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2658 } 2659 2660 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2661 { 2662 int i; 2663 struct bufq_addr_element *small_buf_q_entry; 2664 2665 /* Currently we allocate on one of memory and use it for smallbuffers */ 2666 qdev->small_buf_total_size = 2667 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2668 QL_SMALL_BUFFER_SIZE); 2669 2670 qdev->small_buf_virt_addr = 2671 pci_alloc_consistent(qdev->pdev, 2672 qdev->small_buf_total_size, 2673 &qdev->small_buf_phy_addr); 2674 2675 if (qdev->small_buf_virt_addr == NULL) { 2676 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2677 return -ENOMEM; 2678 } 2679 2680 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2681 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2682 2683 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2684 2685 /* Initialize the small buffer queue. */ 2686 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2687 small_buf_q_entry->addr_high = 2688 cpu_to_le32(qdev->small_buf_phy_addr_high); 2689 small_buf_q_entry->addr_low = 2690 cpu_to_le32(qdev->small_buf_phy_addr_low + 2691 (i * QL_SMALL_BUFFER_SIZE)); 2692 small_buf_q_entry++; 2693 } 2694 qdev->small_buf_index = 0; 2695 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2696 return 0; 2697 } 2698 2699 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2700 { 2701 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2702 netdev_info(qdev->ndev, "Already done\n"); 2703 return; 2704 } 2705 if (qdev->small_buf_virt_addr != NULL) { 2706 pci_free_consistent(qdev->pdev, 2707 qdev->small_buf_total_size, 2708 qdev->small_buf_virt_addr, 2709 qdev->small_buf_phy_addr); 2710 2711 qdev->small_buf_virt_addr = NULL; 2712 } 2713 } 2714 2715 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2716 { 2717 int i = 0; 2718 struct ql_rcv_buf_cb *lrg_buf_cb; 2719 2720 for (i = 0; i < qdev->num_large_buffers; i++) { 2721 lrg_buf_cb = &qdev->lrg_buf[i]; 2722 if (lrg_buf_cb->skb) { 2723 dev_kfree_skb(lrg_buf_cb->skb); 2724 pci_unmap_single(qdev->pdev, 2725 dma_unmap_addr(lrg_buf_cb, mapaddr), 2726 dma_unmap_len(lrg_buf_cb, maplen), 2727 PCI_DMA_FROMDEVICE); 2728 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2729 } else { 2730 break; 2731 } 2732 } 2733 } 2734 2735 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2736 { 2737 int i; 2738 struct ql_rcv_buf_cb *lrg_buf_cb; 2739 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2740 2741 for (i = 0; i < qdev->num_large_buffers; i++) { 2742 lrg_buf_cb = &qdev->lrg_buf[i]; 2743 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2744 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2745 buf_addr_ele++; 2746 } 2747 qdev->lrg_buf_index = 0; 2748 qdev->lrg_buf_skb_check = 0; 2749 } 2750 2751 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2752 { 2753 int i; 2754 struct ql_rcv_buf_cb *lrg_buf_cb; 2755 struct sk_buff *skb; 2756 dma_addr_t map; 2757 int err; 2758 2759 for (i = 0; i < qdev->num_large_buffers; i++) { 2760 skb = netdev_alloc_skb(qdev->ndev, 2761 qdev->lrg_buffer_len); 2762 if (unlikely(!skb)) { 2763 /* Better luck next round */ 2764 netdev_err(qdev->ndev, 2765 "large buff alloc failed for %d bytes at index %d\n", 2766 qdev->lrg_buffer_len * 2, i); 2767 ql_free_large_buffers(qdev); 2768 return -ENOMEM; 2769 } else { 2770 2771 lrg_buf_cb = &qdev->lrg_buf[i]; 2772 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2773 lrg_buf_cb->index = i; 2774 lrg_buf_cb->skb = skb; 2775 /* 2776 * We save some space to copy the ethhdr from first 2777 * buffer 2778 */ 2779 skb_reserve(skb, QL_HEADER_SPACE); 2780 map = pci_map_single(qdev->pdev, 2781 skb->data, 2782 qdev->lrg_buffer_len - 2783 QL_HEADER_SPACE, 2784 PCI_DMA_FROMDEVICE); 2785 2786 err = pci_dma_mapping_error(qdev->pdev, map); 2787 if (err) { 2788 netdev_err(qdev->ndev, 2789 "PCI mapping failed with error: %d\n", 2790 err); 2791 ql_free_large_buffers(qdev); 2792 return -ENOMEM; 2793 } 2794 2795 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2796 dma_unmap_len_set(lrg_buf_cb, maplen, 2797 qdev->lrg_buffer_len - 2798 QL_HEADER_SPACE); 2799 lrg_buf_cb->buf_phy_addr_low = 2800 cpu_to_le32(LS_64BITS(map)); 2801 lrg_buf_cb->buf_phy_addr_high = 2802 cpu_to_le32(MS_64BITS(map)); 2803 } 2804 } 2805 return 0; 2806 } 2807 2808 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2809 { 2810 struct ql_tx_buf_cb *tx_cb; 2811 int i; 2812 2813 tx_cb = &qdev->tx_buf[0]; 2814 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2815 kfree(tx_cb->oal); 2816 tx_cb->oal = NULL; 2817 tx_cb++; 2818 } 2819 } 2820 2821 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2822 { 2823 struct ql_tx_buf_cb *tx_cb; 2824 int i; 2825 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2826 2827 /* Create free list of transmit buffers */ 2828 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2829 2830 tx_cb = &qdev->tx_buf[i]; 2831 tx_cb->skb = NULL; 2832 tx_cb->queue_entry = req_q_curr; 2833 req_q_curr++; 2834 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2835 if (tx_cb->oal == NULL) 2836 return -ENOMEM; 2837 } 2838 return 0; 2839 } 2840 2841 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2842 { 2843 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2844 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2845 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2846 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2847 /* 2848 * Bigger buffers, so less of them. 2849 */ 2850 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2851 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2852 } else { 2853 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2854 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2855 return -ENOMEM; 2856 } 2857 qdev->num_large_buffers = 2858 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2859 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2860 qdev->max_frame_size = 2861 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2862 2863 /* 2864 * First allocate a page of shared memory and use it for shadow 2865 * locations of Network Request Queue Consumer Address Register and 2866 * Network Completion Queue Producer Index Register 2867 */ 2868 qdev->shadow_reg_virt_addr = 2869 pci_alloc_consistent(qdev->pdev, 2870 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2871 2872 if (qdev->shadow_reg_virt_addr != NULL) { 2873 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2874 qdev->req_consumer_index_phy_addr_high = 2875 MS_64BITS(qdev->shadow_reg_phy_addr); 2876 qdev->req_consumer_index_phy_addr_low = 2877 LS_64BITS(qdev->shadow_reg_phy_addr); 2878 2879 qdev->prsp_producer_index = 2880 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2881 qdev->rsp_producer_index_phy_addr_high = 2882 qdev->req_consumer_index_phy_addr_high; 2883 qdev->rsp_producer_index_phy_addr_low = 2884 qdev->req_consumer_index_phy_addr_low + 8; 2885 } else { 2886 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2887 return -ENOMEM; 2888 } 2889 2890 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2891 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2892 goto err_req_rsp; 2893 } 2894 2895 if (ql_alloc_buffer_queues(qdev) != 0) { 2896 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2897 goto err_buffer_queues; 2898 } 2899 2900 if (ql_alloc_small_buffers(qdev) != 0) { 2901 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2902 goto err_small_buffers; 2903 } 2904 2905 if (ql_alloc_large_buffers(qdev) != 0) { 2906 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2907 goto err_small_buffers; 2908 } 2909 2910 /* Initialize the large buffer queue. */ 2911 ql_init_large_buffers(qdev); 2912 if (ql_create_send_free_list(qdev)) 2913 goto err_free_list; 2914 2915 qdev->rsp_current = qdev->rsp_q_virt_addr; 2916 2917 return 0; 2918 err_free_list: 2919 ql_free_send_free_list(qdev); 2920 err_small_buffers: 2921 ql_free_buffer_queues(qdev); 2922 err_buffer_queues: 2923 ql_free_net_req_rsp_queues(qdev); 2924 err_req_rsp: 2925 pci_free_consistent(qdev->pdev, 2926 PAGE_SIZE, 2927 qdev->shadow_reg_virt_addr, 2928 qdev->shadow_reg_phy_addr); 2929 2930 return -ENOMEM; 2931 } 2932 2933 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2934 { 2935 ql_free_send_free_list(qdev); 2936 ql_free_large_buffers(qdev); 2937 ql_free_small_buffers(qdev); 2938 ql_free_buffer_queues(qdev); 2939 ql_free_net_req_rsp_queues(qdev); 2940 if (qdev->shadow_reg_virt_addr != NULL) { 2941 pci_free_consistent(qdev->pdev, 2942 PAGE_SIZE, 2943 qdev->shadow_reg_virt_addr, 2944 qdev->shadow_reg_phy_addr); 2945 qdev->shadow_reg_virt_addr = NULL; 2946 } 2947 } 2948 2949 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2950 { 2951 struct ql3xxx_local_ram_registers __iomem *local_ram = 2952 (void __iomem *)qdev->mem_map_registers; 2953 2954 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2955 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2956 2) << 4)) 2957 return -1; 2958 2959 ql_write_page2_reg(qdev, 2960 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2961 2962 ql_write_page2_reg(qdev, 2963 &local_ram->maxBufletCount, 2964 qdev->nvram_data.bufletCount); 2965 2966 ql_write_page2_reg(qdev, 2967 &local_ram->freeBufletThresholdLow, 2968 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2969 (qdev->nvram_data.tcpWindowThreshold0)); 2970 2971 ql_write_page2_reg(qdev, 2972 &local_ram->freeBufletThresholdHigh, 2973 qdev->nvram_data.tcpWindowThreshold50); 2974 2975 ql_write_page2_reg(qdev, 2976 &local_ram->ipHashTableBase, 2977 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2978 qdev->nvram_data.ipHashTableBaseLo); 2979 ql_write_page2_reg(qdev, 2980 &local_ram->ipHashTableCount, 2981 qdev->nvram_data.ipHashTableSize); 2982 ql_write_page2_reg(qdev, 2983 &local_ram->tcpHashTableBase, 2984 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2985 qdev->nvram_data.tcpHashTableBaseLo); 2986 ql_write_page2_reg(qdev, 2987 &local_ram->tcpHashTableCount, 2988 qdev->nvram_data.tcpHashTableSize); 2989 ql_write_page2_reg(qdev, 2990 &local_ram->ncbBase, 2991 (qdev->nvram_data.ncbTableBaseHi << 16) | 2992 qdev->nvram_data.ncbTableBaseLo); 2993 ql_write_page2_reg(qdev, 2994 &local_ram->maxNcbCount, 2995 qdev->nvram_data.ncbTableSize); 2996 ql_write_page2_reg(qdev, 2997 &local_ram->drbBase, 2998 (qdev->nvram_data.drbTableBaseHi << 16) | 2999 qdev->nvram_data.drbTableBaseLo); 3000 ql_write_page2_reg(qdev, 3001 &local_ram->maxDrbCount, 3002 qdev->nvram_data.drbTableSize); 3003 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3004 return 0; 3005 } 3006 3007 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3008 { 3009 u32 value; 3010 struct ql3xxx_port_registers __iomem *port_regs = 3011 qdev->mem_map_registers; 3012 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3013 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3014 (void __iomem *)port_regs; 3015 u32 delay = 10; 3016 int status = 0; 3017 3018 if (ql_mii_setup(qdev)) 3019 return -1; 3020 3021 /* Bring out PHY out of reset */ 3022 ql_write_common_reg(qdev, spir, 3023 (ISP_SERIAL_PORT_IF_WE | 3024 (ISP_SERIAL_PORT_IF_WE << 16))); 3025 /* Give the PHY time to come out of reset. */ 3026 mdelay(100); 3027 qdev->port_link_state = LS_DOWN; 3028 netif_carrier_off(qdev->ndev); 3029 3030 /* V2 chip fix for ARS-39168. */ 3031 ql_write_common_reg(qdev, spir, 3032 (ISP_SERIAL_PORT_IF_SDE | 3033 (ISP_SERIAL_PORT_IF_SDE << 16))); 3034 3035 /* Request Queue Registers */ 3036 *((u32 *)(qdev->preq_consumer_index)) = 0; 3037 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3038 qdev->req_producer_index = 0; 3039 3040 ql_write_page1_reg(qdev, 3041 &hmem_regs->reqConsumerIndexAddrHigh, 3042 qdev->req_consumer_index_phy_addr_high); 3043 ql_write_page1_reg(qdev, 3044 &hmem_regs->reqConsumerIndexAddrLow, 3045 qdev->req_consumer_index_phy_addr_low); 3046 3047 ql_write_page1_reg(qdev, 3048 &hmem_regs->reqBaseAddrHigh, 3049 MS_64BITS(qdev->req_q_phy_addr)); 3050 ql_write_page1_reg(qdev, 3051 &hmem_regs->reqBaseAddrLow, 3052 LS_64BITS(qdev->req_q_phy_addr)); 3053 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3054 3055 /* Response Queue Registers */ 3056 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3057 qdev->rsp_consumer_index = 0; 3058 qdev->rsp_current = qdev->rsp_q_virt_addr; 3059 3060 ql_write_page1_reg(qdev, 3061 &hmem_regs->rspProducerIndexAddrHigh, 3062 qdev->rsp_producer_index_phy_addr_high); 3063 3064 ql_write_page1_reg(qdev, 3065 &hmem_regs->rspProducerIndexAddrLow, 3066 qdev->rsp_producer_index_phy_addr_low); 3067 3068 ql_write_page1_reg(qdev, 3069 &hmem_regs->rspBaseAddrHigh, 3070 MS_64BITS(qdev->rsp_q_phy_addr)); 3071 3072 ql_write_page1_reg(qdev, 3073 &hmem_regs->rspBaseAddrLow, 3074 LS_64BITS(qdev->rsp_q_phy_addr)); 3075 3076 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3077 3078 /* Large Buffer Queue */ 3079 ql_write_page1_reg(qdev, 3080 &hmem_regs->rxLargeQBaseAddrHigh, 3081 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3082 3083 ql_write_page1_reg(qdev, 3084 &hmem_regs->rxLargeQBaseAddrLow, 3085 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3086 3087 ql_write_page1_reg(qdev, 3088 &hmem_regs->rxLargeQLength, 3089 qdev->num_lbufq_entries); 3090 3091 ql_write_page1_reg(qdev, 3092 &hmem_regs->rxLargeBufferLength, 3093 qdev->lrg_buffer_len); 3094 3095 /* Small Buffer Queue */ 3096 ql_write_page1_reg(qdev, 3097 &hmem_regs->rxSmallQBaseAddrHigh, 3098 MS_64BITS(qdev->small_buf_q_phy_addr)); 3099 3100 ql_write_page1_reg(qdev, 3101 &hmem_regs->rxSmallQBaseAddrLow, 3102 LS_64BITS(qdev->small_buf_q_phy_addr)); 3103 3104 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3105 ql_write_page1_reg(qdev, 3106 &hmem_regs->rxSmallBufferLength, 3107 QL_SMALL_BUFFER_SIZE); 3108 3109 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3110 qdev->small_buf_release_cnt = 8; 3111 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3112 qdev->lrg_buf_release_cnt = 8; 3113 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3114 qdev->small_buf_index = 0; 3115 qdev->lrg_buf_index = 0; 3116 qdev->lrg_buf_free_count = 0; 3117 qdev->lrg_buf_free_head = NULL; 3118 qdev->lrg_buf_free_tail = NULL; 3119 3120 ql_write_common_reg(qdev, 3121 &port_regs->CommonRegs. 3122 rxSmallQProducerIndex, 3123 qdev->small_buf_q_producer_index); 3124 ql_write_common_reg(qdev, 3125 &port_regs->CommonRegs. 3126 rxLargeQProducerIndex, 3127 qdev->lrg_buf_q_producer_index); 3128 3129 /* 3130 * Find out if the chip has already been initialized. If it has, then 3131 * we skip some of the initialization. 3132 */ 3133 clear_bit(QL_LINK_MASTER, &qdev->flags); 3134 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3135 if ((value & PORT_STATUS_IC) == 0) { 3136 3137 /* Chip has not been configured yet, so let it rip. */ 3138 if (ql_init_misc_registers(qdev)) { 3139 status = -1; 3140 goto out; 3141 } 3142 3143 value = qdev->nvram_data.tcpMaxWindowSize; 3144 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3145 3146 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3147 3148 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3149 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3150 * 2) << 13)) { 3151 status = -1; 3152 goto out; 3153 } 3154 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3155 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3156 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3157 16) | (INTERNAL_CHIP_SD | 3158 INTERNAL_CHIP_WE))); 3159 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3160 } 3161 3162 if (qdev->mac_index) 3163 ql_write_page0_reg(qdev, 3164 &port_regs->mac1MaxFrameLengthReg, 3165 qdev->max_frame_size); 3166 else 3167 ql_write_page0_reg(qdev, 3168 &port_regs->mac0MaxFrameLengthReg, 3169 qdev->max_frame_size); 3170 3171 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3172 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3173 2) << 7)) { 3174 status = -1; 3175 goto out; 3176 } 3177 3178 PHY_Setup(qdev); 3179 ql_init_scan_mode(qdev); 3180 ql_get_phy_owner(qdev); 3181 3182 /* Load the MAC Configuration */ 3183 3184 /* Program lower 32 bits of the MAC address */ 3185 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3186 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3187 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3188 ((qdev->ndev->dev_addr[2] << 24) 3189 | (qdev->ndev->dev_addr[3] << 16) 3190 | (qdev->ndev->dev_addr[4] << 8) 3191 | qdev->ndev->dev_addr[5])); 3192 3193 /* Program top 16 bits of the MAC address */ 3194 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3195 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3196 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3197 ((qdev->ndev->dev_addr[0] << 8) 3198 | qdev->ndev->dev_addr[1])); 3199 3200 /* Enable Primary MAC */ 3201 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3202 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3203 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3204 3205 /* Clear Primary and Secondary IP addresses */ 3206 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3207 ((IP_ADDR_INDEX_REG_MASK << 16) | 3208 (qdev->mac_index << 2))); 3209 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3210 3211 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3212 ((IP_ADDR_INDEX_REG_MASK << 16) | 3213 ((qdev->mac_index << 2) + 1))); 3214 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3215 3216 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3217 3218 /* Indicate Configuration Complete */ 3219 ql_write_page0_reg(qdev, 3220 &port_regs->portControl, 3221 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3222 3223 do { 3224 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3225 if (value & PORT_STATUS_IC) 3226 break; 3227 spin_unlock_irq(&qdev->hw_lock); 3228 msleep(500); 3229 spin_lock_irq(&qdev->hw_lock); 3230 } while (--delay); 3231 3232 if (delay == 0) { 3233 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3234 status = -1; 3235 goto out; 3236 } 3237 3238 /* Enable Ethernet Function */ 3239 if (qdev->device_id == QL3032_DEVICE_ID) { 3240 value = 3241 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3242 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3243 QL3032_PORT_CONTROL_ET); 3244 ql_write_page0_reg(qdev, &port_regs->functionControl, 3245 ((value << 16) | value)); 3246 } else { 3247 value = 3248 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3249 PORT_CONTROL_HH); 3250 ql_write_page0_reg(qdev, &port_regs->portControl, 3251 ((value << 16) | value)); 3252 } 3253 3254 3255 out: 3256 return status; 3257 } 3258 3259 /* 3260 * Caller holds hw_lock. 3261 */ 3262 static int ql_adapter_reset(struct ql3_adapter *qdev) 3263 { 3264 struct ql3xxx_port_registers __iomem *port_regs = 3265 qdev->mem_map_registers; 3266 int status = 0; 3267 u16 value; 3268 int max_wait_time; 3269 3270 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3271 clear_bit(QL_RESET_DONE, &qdev->flags); 3272 3273 /* 3274 * Issue soft reset to chip. 3275 */ 3276 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3277 ql_write_common_reg(qdev, 3278 &port_regs->CommonRegs.ispControlStatus, 3279 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3280 3281 /* Wait 3 seconds for reset to complete. */ 3282 netdev_printk(KERN_DEBUG, qdev->ndev, 3283 "Wait 10 milliseconds for reset to complete\n"); 3284 3285 /* Wait until the firmware tells us the Soft Reset is done */ 3286 max_wait_time = 5; 3287 do { 3288 value = 3289 ql_read_common_reg(qdev, 3290 &port_regs->CommonRegs.ispControlStatus); 3291 if ((value & ISP_CONTROL_SR) == 0) 3292 break; 3293 3294 ssleep(1); 3295 } while ((--max_wait_time)); 3296 3297 /* 3298 * Also, make sure that the Network Reset Interrupt bit has been 3299 * cleared after the soft reset has taken place. 3300 */ 3301 value = 3302 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3303 if (value & ISP_CONTROL_RI) { 3304 netdev_printk(KERN_DEBUG, qdev->ndev, 3305 "clearing RI after reset\n"); 3306 ql_write_common_reg(qdev, 3307 &port_regs->CommonRegs. 3308 ispControlStatus, 3309 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3310 } 3311 3312 if (max_wait_time == 0) { 3313 /* Issue Force Soft Reset */ 3314 ql_write_common_reg(qdev, 3315 &port_regs->CommonRegs. 3316 ispControlStatus, 3317 ((ISP_CONTROL_FSR << 16) | 3318 ISP_CONTROL_FSR)); 3319 /* 3320 * Wait until the firmware tells us the Force Soft Reset is 3321 * done 3322 */ 3323 max_wait_time = 5; 3324 do { 3325 value = ql_read_common_reg(qdev, 3326 &port_regs->CommonRegs. 3327 ispControlStatus); 3328 if ((value & ISP_CONTROL_FSR) == 0) 3329 break; 3330 ssleep(1); 3331 } while ((--max_wait_time)); 3332 } 3333 if (max_wait_time == 0) 3334 status = 1; 3335 3336 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3337 set_bit(QL_RESET_DONE, &qdev->flags); 3338 return status; 3339 } 3340 3341 static void ql_set_mac_info(struct ql3_adapter *qdev) 3342 { 3343 struct ql3xxx_port_registers __iomem *port_regs = 3344 qdev->mem_map_registers; 3345 u32 value, port_status; 3346 u8 func_number; 3347 3348 /* Get the function number */ 3349 value = 3350 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3351 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3352 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3353 switch (value & ISP_CONTROL_FN_MASK) { 3354 case ISP_CONTROL_FN0_NET: 3355 qdev->mac_index = 0; 3356 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3357 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3358 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3359 if (port_status & PORT_STATUS_SM0) 3360 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3361 else 3362 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3363 break; 3364 3365 case ISP_CONTROL_FN1_NET: 3366 qdev->mac_index = 1; 3367 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3368 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3369 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3370 if (port_status & PORT_STATUS_SM1) 3371 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3372 else 3373 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3374 break; 3375 3376 case ISP_CONTROL_FN0_SCSI: 3377 case ISP_CONTROL_FN1_SCSI: 3378 default: 3379 netdev_printk(KERN_DEBUG, qdev->ndev, 3380 "Invalid function number, ispControlStatus = 0x%x\n", 3381 value); 3382 break; 3383 } 3384 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3385 } 3386 3387 static void ql_display_dev_info(struct net_device *ndev) 3388 { 3389 struct ql3_adapter *qdev = netdev_priv(ndev); 3390 struct pci_dev *pdev = qdev->pdev; 3391 3392 netdev_info(ndev, 3393 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3394 DRV_NAME, qdev->index, qdev->chip_rev_id, 3395 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3396 qdev->pci_slot); 3397 netdev_info(ndev, "%s Interface\n", 3398 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3399 3400 /* 3401 * Print PCI bus width/type. 3402 */ 3403 netdev_info(ndev, "Bus interface is %s %s\n", 3404 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3405 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3406 3407 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3408 qdev->mem_map_registers); 3409 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3410 3411 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3412 } 3413 3414 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3415 { 3416 struct net_device *ndev = qdev->ndev; 3417 int retval = 0; 3418 3419 netif_stop_queue(ndev); 3420 netif_carrier_off(ndev); 3421 3422 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3423 clear_bit(QL_LINK_MASTER, &qdev->flags); 3424 3425 ql_disable_interrupts(qdev); 3426 3427 free_irq(qdev->pdev->irq, ndev); 3428 3429 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3430 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3431 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3432 pci_disable_msi(qdev->pdev); 3433 } 3434 3435 del_timer_sync(&qdev->adapter_timer); 3436 3437 napi_disable(&qdev->napi); 3438 3439 if (do_reset) { 3440 int soft_reset; 3441 unsigned long hw_flags; 3442 3443 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3444 if (ql_wait_for_drvr_lock(qdev)) { 3445 soft_reset = ql_adapter_reset(qdev); 3446 if (soft_reset) { 3447 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3448 qdev->index); 3449 } 3450 netdev_err(ndev, 3451 "Releasing driver lock via chip reset\n"); 3452 } else { 3453 netdev_err(ndev, 3454 "Could not acquire driver lock to do reset!\n"); 3455 retval = -1; 3456 } 3457 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3458 } 3459 ql_free_mem_resources(qdev); 3460 return retval; 3461 } 3462 3463 static int ql_adapter_up(struct ql3_adapter *qdev) 3464 { 3465 struct net_device *ndev = qdev->ndev; 3466 int err; 3467 unsigned long irq_flags = IRQF_SHARED; 3468 unsigned long hw_flags; 3469 3470 if (ql_alloc_mem_resources(qdev)) { 3471 netdev_err(ndev, "Unable to allocate buffers\n"); 3472 return -ENOMEM; 3473 } 3474 3475 if (qdev->msi) { 3476 if (pci_enable_msi(qdev->pdev)) { 3477 netdev_err(ndev, 3478 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3479 qdev->msi = 0; 3480 } else { 3481 netdev_info(ndev, "MSI Enabled...\n"); 3482 set_bit(QL_MSI_ENABLED, &qdev->flags); 3483 irq_flags &= ~IRQF_SHARED; 3484 } 3485 } 3486 3487 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3488 irq_flags, ndev->name, ndev); 3489 if (err) { 3490 netdev_err(ndev, 3491 "Failed to reserve interrupt %d - already in use\n", 3492 qdev->pdev->irq); 3493 goto err_irq; 3494 } 3495 3496 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3497 3498 err = ql_wait_for_drvr_lock(qdev); 3499 if (err) { 3500 err = ql_adapter_initialize(qdev); 3501 if (err) { 3502 netdev_err(ndev, "Unable to initialize adapter\n"); 3503 goto err_init; 3504 } 3505 netdev_err(ndev, "Releasing driver lock\n"); 3506 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3507 } else { 3508 netdev_err(ndev, "Could not acquire driver lock\n"); 3509 goto err_lock; 3510 } 3511 3512 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3513 3514 set_bit(QL_ADAPTER_UP, &qdev->flags); 3515 3516 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3517 3518 napi_enable(&qdev->napi); 3519 ql_enable_interrupts(qdev); 3520 return 0; 3521 3522 err_init: 3523 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3524 err_lock: 3525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3526 free_irq(qdev->pdev->irq, ndev); 3527 err_irq: 3528 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3529 netdev_info(ndev, "calling pci_disable_msi()\n"); 3530 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3531 pci_disable_msi(qdev->pdev); 3532 } 3533 return err; 3534 } 3535 3536 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3537 { 3538 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3539 netdev_err(qdev->ndev, 3540 "Driver up/down cycle failed, closing device\n"); 3541 rtnl_lock(); 3542 dev_close(qdev->ndev); 3543 rtnl_unlock(); 3544 return -1; 3545 } 3546 return 0; 3547 } 3548 3549 static int ql3xxx_close(struct net_device *ndev) 3550 { 3551 struct ql3_adapter *qdev = netdev_priv(ndev); 3552 3553 /* 3554 * Wait for device to recover from a reset. 3555 * (Rarely happens, but possible.) 3556 */ 3557 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3558 msleep(50); 3559 3560 ql_adapter_down(qdev, QL_DO_RESET); 3561 return 0; 3562 } 3563 3564 static int ql3xxx_open(struct net_device *ndev) 3565 { 3566 struct ql3_adapter *qdev = netdev_priv(ndev); 3567 return ql_adapter_up(qdev); 3568 } 3569 3570 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3571 { 3572 struct ql3_adapter *qdev = netdev_priv(ndev); 3573 struct ql3xxx_port_registers __iomem *port_regs = 3574 qdev->mem_map_registers; 3575 struct sockaddr *addr = p; 3576 unsigned long hw_flags; 3577 3578 if (netif_running(ndev)) 3579 return -EBUSY; 3580 3581 if (!is_valid_ether_addr(addr->sa_data)) 3582 return -EADDRNOTAVAIL; 3583 3584 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3585 3586 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3587 /* Program lower 32 bits of the MAC address */ 3588 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3589 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3590 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3591 ((ndev->dev_addr[2] << 24) | (ndev-> 3592 dev_addr[3] << 16) | 3593 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3594 3595 /* Program top 16 bits of the MAC address */ 3596 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3597 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3598 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3599 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3600 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3601 3602 return 0; 3603 } 3604 3605 static void ql3xxx_tx_timeout(struct net_device *ndev) 3606 { 3607 struct ql3_adapter *qdev = netdev_priv(ndev); 3608 3609 netdev_err(ndev, "Resetting...\n"); 3610 /* 3611 * Stop the queues, we've got a problem. 3612 */ 3613 netif_stop_queue(ndev); 3614 3615 /* 3616 * Wake up the worker to process this event. 3617 */ 3618 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3619 } 3620 3621 static void ql_reset_work(struct work_struct *work) 3622 { 3623 struct ql3_adapter *qdev = 3624 container_of(work, struct ql3_adapter, reset_work.work); 3625 struct net_device *ndev = qdev->ndev; 3626 u32 value; 3627 struct ql_tx_buf_cb *tx_cb; 3628 int max_wait_time, i; 3629 struct ql3xxx_port_registers __iomem *port_regs = 3630 qdev->mem_map_registers; 3631 unsigned long hw_flags; 3632 3633 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3634 clear_bit(QL_LINK_MASTER, &qdev->flags); 3635 3636 /* 3637 * Loop through the active list and return the skb. 3638 */ 3639 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3640 int j; 3641 tx_cb = &qdev->tx_buf[i]; 3642 if (tx_cb->skb) { 3643 netdev_printk(KERN_DEBUG, ndev, 3644 "Freeing lost SKB\n"); 3645 pci_unmap_single(qdev->pdev, 3646 dma_unmap_addr(&tx_cb->map[0], 3647 mapaddr), 3648 dma_unmap_len(&tx_cb->map[0], maplen), 3649 PCI_DMA_TODEVICE); 3650 for (j = 1; j < tx_cb->seg_count; j++) { 3651 pci_unmap_page(qdev->pdev, 3652 dma_unmap_addr(&tx_cb->map[j], 3653 mapaddr), 3654 dma_unmap_len(&tx_cb->map[j], 3655 maplen), 3656 PCI_DMA_TODEVICE); 3657 } 3658 dev_kfree_skb(tx_cb->skb); 3659 tx_cb->skb = NULL; 3660 } 3661 } 3662 3663 netdev_err(ndev, "Clearing NRI after reset\n"); 3664 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3665 ql_write_common_reg(qdev, 3666 &port_regs->CommonRegs. 3667 ispControlStatus, 3668 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3669 /* 3670 * Wait the for Soft Reset to Complete. 3671 */ 3672 max_wait_time = 10; 3673 do { 3674 value = ql_read_common_reg(qdev, 3675 &port_regs->CommonRegs. 3676 3677 ispControlStatus); 3678 if ((value & ISP_CONTROL_SR) == 0) { 3679 netdev_printk(KERN_DEBUG, ndev, 3680 "reset completed\n"); 3681 break; 3682 } 3683 3684 if (value & ISP_CONTROL_RI) { 3685 netdev_printk(KERN_DEBUG, ndev, 3686 "clearing NRI after reset\n"); 3687 ql_write_common_reg(qdev, 3688 &port_regs-> 3689 CommonRegs. 3690 ispControlStatus, 3691 ((ISP_CONTROL_RI << 3692 16) | ISP_CONTROL_RI)); 3693 } 3694 3695 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3696 ssleep(1); 3697 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3698 } while (--max_wait_time); 3699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3700 3701 if (value & ISP_CONTROL_SR) { 3702 3703 /* 3704 * Set the reset flags and clear the board again. 3705 * Nothing else to do... 3706 */ 3707 netdev_err(ndev, 3708 "Timed out waiting for reset to complete\n"); 3709 netdev_err(ndev, "Do a reset\n"); 3710 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3711 clear_bit(QL_RESET_START, &qdev->flags); 3712 ql_cycle_adapter(qdev, QL_DO_RESET); 3713 return; 3714 } 3715 3716 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3717 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3718 clear_bit(QL_RESET_START, &qdev->flags); 3719 ql_cycle_adapter(qdev, QL_NO_RESET); 3720 } 3721 } 3722 3723 static void ql_tx_timeout_work(struct work_struct *work) 3724 { 3725 struct ql3_adapter *qdev = 3726 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3727 3728 ql_cycle_adapter(qdev, QL_DO_RESET); 3729 } 3730 3731 static void ql_get_board_info(struct ql3_adapter *qdev) 3732 { 3733 struct ql3xxx_port_registers __iomem *port_regs = 3734 qdev->mem_map_registers; 3735 u32 value; 3736 3737 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3738 3739 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3740 if (value & PORT_STATUS_64) 3741 qdev->pci_width = 64; 3742 else 3743 qdev->pci_width = 32; 3744 if (value & PORT_STATUS_X) 3745 qdev->pci_x = 1; 3746 else 3747 qdev->pci_x = 0; 3748 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3749 } 3750 3751 static void ql3xxx_timer(struct timer_list *t) 3752 { 3753 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer); 3754 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3755 } 3756 3757 static const struct net_device_ops ql3xxx_netdev_ops = { 3758 .ndo_open = ql3xxx_open, 3759 .ndo_start_xmit = ql3xxx_send, 3760 .ndo_stop = ql3xxx_close, 3761 .ndo_validate_addr = eth_validate_addr, 3762 .ndo_set_mac_address = ql3xxx_set_mac_address, 3763 .ndo_tx_timeout = ql3xxx_tx_timeout, 3764 }; 3765 3766 static int ql3xxx_probe(struct pci_dev *pdev, 3767 const struct pci_device_id *pci_entry) 3768 { 3769 struct net_device *ndev = NULL; 3770 struct ql3_adapter *qdev = NULL; 3771 static int cards_found; 3772 int uninitialized_var(pci_using_dac), err; 3773 3774 err = pci_enable_device(pdev); 3775 if (err) { 3776 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3777 goto err_out; 3778 } 3779 3780 err = pci_request_regions(pdev, DRV_NAME); 3781 if (err) { 3782 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3783 goto err_out_disable_pdev; 3784 } 3785 3786 pci_set_master(pdev); 3787 3788 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3789 pci_using_dac = 1; 3790 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3791 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3792 pci_using_dac = 0; 3793 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3794 } 3795 3796 if (err) { 3797 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3798 goto err_out_free_regions; 3799 } 3800 3801 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3802 if (!ndev) { 3803 err = -ENOMEM; 3804 goto err_out_free_regions; 3805 } 3806 3807 SET_NETDEV_DEV(ndev, &pdev->dev); 3808 3809 pci_set_drvdata(pdev, ndev); 3810 3811 qdev = netdev_priv(ndev); 3812 qdev->index = cards_found; 3813 qdev->ndev = ndev; 3814 qdev->pdev = pdev; 3815 qdev->device_id = pci_entry->device; 3816 qdev->port_link_state = LS_DOWN; 3817 if (msi) 3818 qdev->msi = 1; 3819 3820 qdev->msg_enable = netif_msg_init(debug, default_msg); 3821 3822 if (pci_using_dac) 3823 ndev->features |= NETIF_F_HIGHDMA; 3824 if (qdev->device_id == QL3032_DEVICE_ID) 3825 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3826 3827 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3828 if (!qdev->mem_map_registers) { 3829 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3830 err = -EIO; 3831 goto err_out_free_ndev; 3832 } 3833 3834 spin_lock_init(&qdev->adapter_lock); 3835 spin_lock_init(&qdev->hw_lock); 3836 3837 /* Set driver entry points */ 3838 ndev->netdev_ops = &ql3xxx_netdev_ops; 3839 ndev->ethtool_ops = &ql3xxx_ethtool_ops; 3840 ndev->watchdog_timeo = 5 * HZ; 3841 3842 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3843 3844 ndev->irq = pdev->irq; 3845 3846 /* make sure the EEPROM is good */ 3847 if (ql_get_nvram_params(qdev)) { 3848 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3849 __func__, qdev->index); 3850 err = -EIO; 3851 goto err_out_iounmap; 3852 } 3853 3854 ql_set_mac_info(qdev); 3855 3856 /* Validate and set parameters */ 3857 if (qdev->mac_index) { 3858 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3859 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3860 } else { 3861 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3862 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3863 } 3864 3865 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3866 3867 /* Record PCI bus information. */ 3868 ql_get_board_info(qdev); 3869 3870 /* 3871 * Set the Maximum Memory Read Byte Count value. We do this to handle 3872 * jumbo frames. 3873 */ 3874 if (qdev->pci_x) 3875 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3876 3877 err = register_netdev(ndev); 3878 if (err) { 3879 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3880 goto err_out_iounmap; 3881 } 3882 3883 /* we're going to reset, so assume we have no link for now */ 3884 3885 netif_carrier_off(ndev); 3886 netif_stop_queue(ndev); 3887 3888 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3889 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3890 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3891 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3892 3893 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0); 3894 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3895 3896 if (!cards_found) { 3897 pr_alert("%s\n", DRV_STRING); 3898 pr_alert("Driver name: %s, Version: %s\n", 3899 DRV_NAME, DRV_VERSION); 3900 } 3901 ql_display_dev_info(ndev); 3902 3903 cards_found++; 3904 return 0; 3905 3906 err_out_iounmap: 3907 iounmap(qdev->mem_map_registers); 3908 err_out_free_ndev: 3909 free_netdev(ndev); 3910 err_out_free_regions: 3911 pci_release_regions(pdev); 3912 err_out_disable_pdev: 3913 pci_disable_device(pdev); 3914 err_out: 3915 return err; 3916 } 3917 3918 static void ql3xxx_remove(struct pci_dev *pdev) 3919 { 3920 struct net_device *ndev = pci_get_drvdata(pdev); 3921 struct ql3_adapter *qdev = netdev_priv(ndev); 3922 3923 unregister_netdev(ndev); 3924 3925 ql_disable_interrupts(qdev); 3926 3927 if (qdev->workqueue) { 3928 cancel_delayed_work(&qdev->reset_work); 3929 cancel_delayed_work(&qdev->tx_timeout_work); 3930 destroy_workqueue(qdev->workqueue); 3931 qdev->workqueue = NULL; 3932 } 3933 3934 iounmap(qdev->mem_map_registers); 3935 pci_release_regions(pdev); 3936 free_netdev(ndev); 3937 } 3938 3939 static struct pci_driver ql3xxx_driver = { 3940 3941 .name = DRV_NAME, 3942 .id_table = ql3xxx_pci_tbl, 3943 .probe = ql3xxx_probe, 3944 .remove = ql3xxx_remove, 3945 }; 3946 3947 module_pci_driver(ql3xxx_driver); 3948