1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/pci.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dmapool.h> 19 #include <linux/mempool.h> 20 #include <linux/spinlock.h> 21 #include <linux/kthread.h> 22 #include <linux/interrupt.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/ip.h> 26 #include <linux/in.h> 27 #include <linux/if_arp.h> 28 #include <linux/if_ether.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/ethtool.h> 32 #include <linux/skbuff.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/if_vlan.h> 35 #include <linux/delay.h> 36 #include <linux/mm.h> 37 #include <linux/prefetch.h> 38 39 #include "qla3xxx.h" 40 41 #define DRV_NAME "qla3xxx" 42 #define DRV_STRING "QLogic ISP3XXX Network Driver" 43 #define DRV_VERSION "v2.03.00-k5" 44 45 static const char ql3xxx_driver_name[] = DRV_NAME; 46 static const char ql3xxx_driver_version[] = DRV_VERSION; 47 48 #define TIMED_OUT_MSG \ 49 "Timed out waiting for management port to get free before issuing command\n" 50 51 MODULE_AUTHOR("QLogic Corporation"); 52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 53 MODULE_LICENSE("GPL"); 54 MODULE_VERSION(DRV_VERSION); 55 56 static const u32 default_msg 57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 59 60 static int debug = -1; /* defaults above */ 61 module_param(debug, int, 0); 62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64 static int msi; 65 module_param(msi, int, 0); 66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 67 68 static const struct pci_device_id ql3xxx_pci_tbl[] = { 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 71 /* required last entry */ 72 {0,} 73 }; 74 75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 76 77 /* 78 * These are the known PHY's which are used 79 */ 80 enum PHY_DEVICE_TYPE { 81 PHY_TYPE_UNKNOWN = 0, 82 PHY_VITESSE_VSC8211, 83 PHY_AGERE_ET1011C, 84 MAX_PHY_DEV_TYPES 85 }; 86 87 struct PHY_DEVICE_INFO { 88 const enum PHY_DEVICE_TYPE phyDevice; 89 const u32 phyIdOUI; 90 const u16 phyIdModel; 91 const char *name; 92 }; 93 94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 98 }; 99 100 101 /* 102 * Caller must take hw_lock. 103 */ 104 static int ql_sem_spinlock(struct ql3_adapter *qdev, 105 u32 sem_mask, u32 sem_bits) 106 { 107 struct ql3xxx_port_registers __iomem *port_regs = 108 qdev->mem_map_registers; 109 u32 value; 110 unsigned int seconds = 3; 111 112 do { 113 writel((sem_mask | sem_bits), 114 &port_regs->CommonRegs.semaphoreReg); 115 value = readl(&port_regs->CommonRegs.semaphoreReg); 116 if ((value & (sem_mask >> 16)) == sem_bits) 117 return 0; 118 ssleep(1); 119 } while (--seconds); 120 return -1; 121 } 122 123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 124 { 125 struct ql3xxx_port_registers __iomem *port_regs = 126 qdev->mem_map_registers; 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg); 129 } 130 131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 132 { 133 struct ql3xxx_port_registers __iomem *port_regs = 134 qdev->mem_map_registers; 135 u32 value; 136 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 138 value = readl(&port_regs->CommonRegs.semaphoreReg); 139 return ((value & (sem_mask >> 16)) == sem_bits); 140 } 141 142 /* 143 * Caller holds hw_lock. 144 */ 145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 146 { 147 int i = 0; 148 149 do { 150 if (ql_sem_lock(qdev, 151 QL_DRVR_SEM_MASK, 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 153 * 2) << 1)) { 154 netdev_printk(KERN_DEBUG, qdev->ndev, 155 "driver lock acquired\n"); 156 return 1; 157 } 158 ssleep(1); 159 } while (++i < 10); 160 161 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 162 return 0; 163 } 164 165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 166 { 167 struct ql3xxx_port_registers __iomem *port_regs = 168 qdev->mem_map_registers; 169 170 writel(((ISP_CONTROL_NP_MASK << 16) | page), 171 &port_regs->CommonRegs.ispControlStatus); 172 readl(&port_regs->CommonRegs.ispControlStatus); 173 qdev->current_page = page; 174 } 175 176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 177 { 178 u32 value; 179 unsigned long hw_flags; 180 181 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 182 value = readl(reg); 183 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 184 185 return value; 186 } 187 188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 189 { 190 return readl(reg); 191 } 192 193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 194 { 195 u32 value; 196 unsigned long hw_flags; 197 198 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 199 200 if (qdev->current_page != 0) 201 ql_set_register_page(qdev, 0); 202 value = readl(reg); 203 204 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 205 return value; 206 } 207 208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 209 { 210 if (qdev->current_page != 0) 211 ql_set_register_page(qdev, 0); 212 return readl(reg); 213 } 214 215 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 216 u32 __iomem *reg, u32 value) 217 { 218 unsigned long hw_flags; 219 220 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 221 writel(value, reg); 222 readl(reg); 223 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 224 } 225 226 static void ql_write_common_reg(struct ql3_adapter *qdev, 227 u32 __iomem *reg, u32 value) 228 { 229 writel(value, reg); 230 readl(reg); 231 } 232 233 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 234 u32 __iomem *reg, u32 value) 235 { 236 writel(value, reg); 237 readl(reg); 238 udelay(1); 239 } 240 241 static void ql_write_page0_reg(struct ql3_adapter *qdev, 242 u32 __iomem *reg, u32 value) 243 { 244 if (qdev->current_page != 0) 245 ql_set_register_page(qdev, 0); 246 writel(value, reg); 247 readl(reg); 248 } 249 250 /* 251 * Caller holds hw_lock. Only called during init. 252 */ 253 static void ql_write_page1_reg(struct ql3_adapter *qdev, 254 u32 __iomem *reg, u32 value) 255 { 256 if (qdev->current_page != 1) 257 ql_set_register_page(qdev, 1); 258 writel(value, reg); 259 readl(reg); 260 } 261 262 /* 263 * Caller holds hw_lock. Only called during init. 264 */ 265 static void ql_write_page2_reg(struct ql3_adapter *qdev, 266 u32 __iomem *reg, u32 value) 267 { 268 if (qdev->current_page != 2) 269 ql_set_register_page(qdev, 2); 270 writel(value, reg); 271 readl(reg); 272 } 273 274 static void ql_disable_interrupts(struct ql3_adapter *qdev) 275 { 276 struct ql3xxx_port_registers __iomem *port_regs = 277 qdev->mem_map_registers; 278 279 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 280 (ISP_IMR_ENABLE_INT << 16)); 281 282 } 283 284 static void ql_enable_interrupts(struct ql3_adapter *qdev) 285 { 286 struct ql3xxx_port_registers __iomem *port_regs = 287 qdev->mem_map_registers; 288 289 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 290 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 291 292 } 293 294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 295 struct ql_rcv_buf_cb *lrg_buf_cb) 296 { 297 dma_addr_t map; 298 int err; 299 lrg_buf_cb->next = NULL; 300 301 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 302 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 303 } else { 304 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 305 qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } 307 308 if (!lrg_buf_cb->skb) { 309 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 310 qdev->lrg_buffer_len); 311 if (unlikely(!lrg_buf_cb->skb)) { 312 qdev->lrg_buf_skb_check++; 313 } else { 314 /* 315 * We save some space to copy the ethhdr from first 316 * buffer 317 */ 318 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 319 map = pci_map_single(qdev->pdev, 320 lrg_buf_cb->skb->data, 321 qdev->lrg_buffer_len - 322 QL_HEADER_SPACE, 323 PCI_DMA_FROMDEVICE); 324 err = pci_dma_mapping_error(qdev->pdev, map); 325 if (err) { 326 netdev_err(qdev->ndev, 327 "PCI mapping failed with error: %d\n", 328 err); 329 dev_kfree_skb(lrg_buf_cb->skb); 330 lrg_buf_cb->skb = NULL; 331 332 qdev->lrg_buf_skb_check++; 333 return; 334 } 335 336 lrg_buf_cb->buf_phy_addr_low = 337 cpu_to_le32(LS_64BITS(map)); 338 lrg_buf_cb->buf_phy_addr_high = 339 cpu_to_le32(MS_64BITS(map)); 340 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 341 dma_unmap_len_set(lrg_buf_cb, maplen, 342 qdev->lrg_buffer_len - 343 QL_HEADER_SPACE); 344 } 345 } 346 347 qdev->lrg_buf_free_count++; 348 } 349 350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 351 *qdev) 352 { 353 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 354 355 if (lrg_buf_cb != NULL) { 356 qdev->lrg_buf_free_head = lrg_buf_cb->next; 357 if (qdev->lrg_buf_free_head == NULL) 358 qdev->lrg_buf_free_tail = NULL; 359 qdev->lrg_buf_free_count--; 360 } 361 362 return lrg_buf_cb; 363 } 364 365 static u32 addrBits = EEPROM_NO_ADDR_BITS; 366 static u32 dataBits = EEPROM_NO_DATA_BITS; 367 368 static void fm93c56a_deselect(struct ql3_adapter *qdev); 369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 370 unsigned short *value); 371 372 /* 373 * Caller holds hw_lock. 374 */ 375 static void fm93c56a_select(struct ql3_adapter *qdev) 376 { 377 struct ql3xxx_port_registers __iomem *port_regs = 378 qdev->mem_map_registers; 379 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 380 381 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 382 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 383 } 384 385 /* 386 * Caller holds hw_lock. 387 */ 388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 389 { 390 int i; 391 u32 mask; 392 u32 dataBit; 393 u32 previousBit; 394 struct ql3xxx_port_registers __iomem *port_regs = 395 qdev->mem_map_registers; 396 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 397 398 /* Clock in a zero, then do the start bit */ 399 ql_write_nvram_reg(qdev, spir, 400 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 401 AUBURN_EEPROM_DO_1)); 402 ql_write_nvram_reg(qdev, spir, 403 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 404 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 405 ql_write_nvram_reg(qdev, spir, 406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 407 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 408 409 mask = 1 << (FM93C56A_CMD_BITS - 1); 410 /* Force the previous data bit to be different */ 411 previousBit = 0xffff; 412 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 413 dataBit = (cmd & mask) 414 ? AUBURN_EEPROM_DO_1 415 : AUBURN_EEPROM_DO_0; 416 if (previousBit != dataBit) { 417 /* If the bit changed, change the DO state to match */ 418 ql_write_nvram_reg(qdev, spir, 419 (ISP_NVRAM_MASK | 420 qdev->eeprom_cmd_data | dataBit)); 421 previousBit = dataBit; 422 } 423 ql_write_nvram_reg(qdev, spir, 424 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 425 dataBit | AUBURN_EEPROM_CLK_RISE)); 426 ql_write_nvram_reg(qdev, spir, 427 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 428 dataBit | AUBURN_EEPROM_CLK_FALL)); 429 cmd = cmd << 1; 430 } 431 432 mask = 1 << (addrBits - 1); 433 /* Force the previous data bit to be different */ 434 previousBit = 0xffff; 435 for (i = 0; i < addrBits; i++) { 436 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 437 : AUBURN_EEPROM_DO_0; 438 if (previousBit != dataBit) { 439 /* 440 * If the bit changed, then change the DO state to 441 * match 442 */ 443 ql_write_nvram_reg(qdev, spir, 444 (ISP_NVRAM_MASK | 445 qdev->eeprom_cmd_data | dataBit)); 446 previousBit = dataBit; 447 } 448 ql_write_nvram_reg(qdev, spir, 449 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 450 dataBit | AUBURN_EEPROM_CLK_RISE)); 451 ql_write_nvram_reg(qdev, spir, 452 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 453 dataBit | AUBURN_EEPROM_CLK_FALL)); 454 eepromAddr = eepromAddr << 1; 455 } 456 } 457 458 /* 459 * Caller holds hw_lock. 460 */ 461 static void fm93c56a_deselect(struct ql3_adapter *qdev) 462 { 463 struct ql3xxx_port_registers __iomem *port_regs = 464 qdev->mem_map_registers; 465 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 466 467 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 468 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 469 } 470 471 /* 472 * Caller holds hw_lock. 473 */ 474 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 475 { 476 int i; 477 u32 data = 0; 478 u32 dataBit; 479 struct ql3xxx_port_registers __iomem *port_regs = 480 qdev->mem_map_registers; 481 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 482 483 /* Read the data bits */ 484 /* The first bit is a dummy. Clock right over it. */ 485 for (i = 0; i < dataBits; i++) { 486 ql_write_nvram_reg(qdev, spir, 487 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 488 AUBURN_EEPROM_CLK_RISE); 489 ql_write_nvram_reg(qdev, spir, 490 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 491 AUBURN_EEPROM_CLK_FALL); 492 dataBit = (ql_read_common_reg(qdev, spir) & 493 AUBURN_EEPROM_DI_1) ? 1 : 0; 494 data = (data << 1) | dataBit; 495 } 496 *value = (u16)data; 497 } 498 499 /* 500 * Caller holds hw_lock. 501 */ 502 static void eeprom_readword(struct ql3_adapter *qdev, 503 u32 eepromAddr, unsigned short *value) 504 { 505 fm93c56a_select(qdev); 506 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 507 fm93c56a_datain(qdev, value); 508 fm93c56a_deselect(qdev); 509 } 510 511 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 512 { 513 __le16 *p = (__le16 *)ndev->dev_addr; 514 p[0] = cpu_to_le16(addr[0]); 515 p[1] = cpu_to_le16(addr[1]); 516 p[2] = cpu_to_le16(addr[2]); 517 } 518 519 static int ql_get_nvram_params(struct ql3_adapter *qdev) 520 { 521 u16 *pEEPROMData; 522 u16 checksum = 0; 523 u32 index; 524 unsigned long hw_flags; 525 526 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 527 528 pEEPROMData = (u16 *)&qdev->nvram_data; 529 qdev->eeprom_cmd_data = 0; 530 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 531 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 532 2) << 10)) { 533 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 535 return -1; 536 } 537 538 for (index = 0; index < EEPROM_SIZE; index++) { 539 eeprom_readword(qdev, index, pEEPROMData); 540 checksum += *pEEPROMData; 541 pEEPROMData++; 542 } 543 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 544 545 if (checksum != 0) { 546 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 547 checksum); 548 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 549 return -1; 550 } 551 552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 553 return checksum; 554 } 555 556 static const u32 PHYAddr[2] = { 557 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 558 }; 559 560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 561 { 562 struct ql3xxx_port_registers __iomem *port_regs = 563 qdev->mem_map_registers; 564 u32 temp; 565 int count = 1000; 566 567 while (count) { 568 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 569 if (!(temp & MAC_MII_STATUS_BSY)) 570 return 0; 571 udelay(10); 572 count--; 573 } 574 return -1; 575 } 576 577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 578 { 579 struct ql3xxx_port_registers __iomem *port_regs = 580 qdev->mem_map_registers; 581 u32 scanControl; 582 583 if (qdev->numPorts > 1) { 584 /* Auto scan will cycle through multiple ports */ 585 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 586 } else { 587 scanControl = MAC_MII_CONTROL_SC; 588 } 589 590 /* 591 * Scan register 1 of PHY/PETBI, 592 * Set up to scan both devices 593 * The autoscan starts from the first register, completes 594 * the last one before rolling over to the first 595 */ 596 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 597 PHYAddr[0] | MII_SCAN_REGISTER); 598 599 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 600 (scanControl) | 601 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 602 } 603 604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 605 { 606 u8 ret; 607 struct ql3xxx_port_registers __iomem *port_regs = 608 qdev->mem_map_registers; 609 610 /* See if scan mode is enabled before we turn it off */ 611 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 612 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 613 /* Scan is enabled */ 614 ret = 1; 615 } else { 616 /* Scan is disabled */ 617 ret = 0; 618 } 619 620 /* 621 * When disabling scan mode you must first change the MII register 622 * address 623 */ 624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 625 PHYAddr[0] | MII_SCAN_REGISTER); 626 627 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 628 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 629 MAC_MII_CONTROL_RC) << 16)); 630 631 return ret; 632 } 633 634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 635 u16 regAddr, u16 value, u32 phyAddr) 636 { 637 struct ql3xxx_port_registers __iomem *port_regs = 638 qdev->mem_map_registers; 639 u8 scanWasEnabled; 640 641 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 642 643 if (ql_wait_for_mii_ready(qdev)) { 644 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 645 return -1; 646 } 647 648 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 649 phyAddr | regAddr); 650 651 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 652 653 /* Wait for write to complete 9/10/04 SJP */ 654 if (ql_wait_for_mii_ready(qdev)) { 655 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 656 return -1; 657 } 658 659 if (scanWasEnabled) 660 ql_mii_enable_scan_mode(qdev); 661 662 return 0; 663 } 664 665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 666 u16 *value, u32 phyAddr) 667 { 668 struct ql3xxx_port_registers __iomem *port_regs = 669 qdev->mem_map_registers; 670 u8 scanWasEnabled; 671 u32 temp; 672 673 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 674 675 if (ql_wait_for_mii_ready(qdev)) { 676 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 677 return -1; 678 } 679 680 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 681 phyAddr | regAddr); 682 683 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 684 (MAC_MII_CONTROL_RC << 16)); 685 686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 687 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 688 689 /* Wait for the read to complete */ 690 if (ql_wait_for_mii_ready(qdev)) { 691 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 692 return -1; 693 } 694 695 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 696 *value = (u16) temp; 697 698 if (scanWasEnabled) 699 ql_mii_enable_scan_mode(qdev); 700 701 return 0; 702 } 703 704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 705 { 706 struct ql3xxx_port_registers __iomem *port_regs = 707 qdev->mem_map_registers; 708 709 ql_mii_disable_scan_mode(qdev); 710 711 if (ql_wait_for_mii_ready(qdev)) { 712 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 713 return -1; 714 } 715 716 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 717 qdev->PHYAddr | regAddr); 718 719 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 720 721 /* Wait for write to complete. */ 722 if (ql_wait_for_mii_ready(qdev)) { 723 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 724 return -1; 725 } 726 727 ql_mii_enable_scan_mode(qdev); 728 729 return 0; 730 } 731 732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 733 { 734 u32 temp; 735 struct ql3xxx_port_registers __iomem *port_regs = 736 qdev->mem_map_registers; 737 738 ql_mii_disable_scan_mode(qdev); 739 740 if (ql_wait_for_mii_ready(qdev)) { 741 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 742 return -1; 743 } 744 745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 746 qdev->PHYAddr | regAddr); 747 748 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 749 (MAC_MII_CONTROL_RC << 16)); 750 751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 752 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 753 754 /* Wait for the read to complete */ 755 if (ql_wait_for_mii_ready(qdev)) { 756 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 757 return -1; 758 } 759 760 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 761 *value = (u16) temp; 762 763 ql_mii_enable_scan_mode(qdev); 764 765 return 0; 766 } 767 768 static void ql_petbi_reset(struct ql3_adapter *qdev) 769 { 770 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 771 } 772 773 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 774 { 775 u16 reg; 776 777 /* Enable Auto-negotiation sense */ 778 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 779 reg |= PETBI_TBI_AUTO_SENSE; 780 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 781 782 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 783 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 784 785 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 786 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 787 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 788 789 } 790 791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 792 { 793 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 794 PHYAddr[qdev->mac_index]); 795 } 796 797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 798 { 799 u16 reg; 800 801 /* Enable Auto-negotiation sense */ 802 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 803 PHYAddr[qdev->mac_index]); 804 reg |= PETBI_TBI_AUTO_SENSE; 805 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 806 PHYAddr[qdev->mac_index]); 807 808 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 809 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 810 PHYAddr[qdev->mac_index]); 811 812 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 813 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 814 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 815 PHYAddr[qdev->mac_index]); 816 } 817 818 static void ql_petbi_init(struct ql3_adapter *qdev) 819 { 820 ql_petbi_reset(qdev); 821 ql_petbi_start_neg(qdev); 822 } 823 824 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 825 { 826 ql_petbi_reset_ex(qdev); 827 ql_petbi_start_neg_ex(qdev); 828 } 829 830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 831 { 832 u16 reg; 833 834 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 835 return 0; 836 837 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 838 } 839 840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 841 { 842 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 843 /* power down device bit 11 = 1 */ 844 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 845 /* enable diagnostic mode bit 2 = 1 */ 846 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 847 /* 1000MB amplitude adjust (see Agere errata) */ 848 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 849 /* 1000MB amplitude adjust (see Agere errata) */ 850 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 851 /* 100MB amplitude adjust (see Agere errata) */ 852 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 853 /* 100MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 855 /* 10MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 857 /* 10MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 859 /* point to hidden reg 0x2806 */ 860 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 861 /* Write new PHYAD w/bit 5 set */ 862 ql_mii_write_reg_ex(qdev, 0x11, 863 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 864 /* 865 * Disable diagnostic mode bit 2 = 0 866 * Power up device bit 11 = 0 867 * Link up (on) and activity (blink) 868 */ 869 ql_mii_write_reg(qdev, 0x12, 0x840a); 870 ql_mii_write_reg(qdev, 0x00, 0x1140); 871 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 872 } 873 874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 875 u16 phyIdReg0, u16 phyIdReg1) 876 { 877 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 878 u32 oui; 879 u16 model; 880 int i; 881 882 if (phyIdReg0 == 0xffff) 883 return result; 884 885 if (phyIdReg1 == 0xffff) 886 return result; 887 888 /* oui is split between two registers */ 889 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 890 891 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 892 893 /* Scan table for this PHY */ 894 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 895 if ((oui == PHY_DEVICES[i].phyIdOUI) && 896 (model == PHY_DEVICES[i].phyIdModel)) { 897 netdev_info(qdev->ndev, "Phy: %s\n", 898 PHY_DEVICES[i].name); 899 result = PHY_DEVICES[i].phyDevice; 900 break; 901 } 902 } 903 904 return result; 905 } 906 907 static int ql_phy_get_speed(struct ql3_adapter *qdev) 908 { 909 u16 reg; 910 911 switch (qdev->phyType) { 912 case PHY_AGERE_ET1011C: { 913 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 914 return 0; 915 916 reg = (reg >> 8) & 3; 917 break; 918 } 919 default: 920 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 921 return 0; 922 923 reg = (((reg & 0x18) >> 3) & 3); 924 } 925 926 switch (reg) { 927 case 2: 928 return SPEED_1000; 929 case 1: 930 return SPEED_100; 931 case 0: 932 return SPEED_10; 933 default: 934 return -1; 935 } 936 } 937 938 static int ql_is_full_dup(struct ql3_adapter *qdev) 939 { 940 u16 reg; 941 942 switch (qdev->phyType) { 943 case PHY_AGERE_ET1011C: { 944 if (ql_mii_read_reg(qdev, 0x1A, ®)) 945 return 0; 946 947 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 948 } 949 case PHY_VITESSE_VSC8211: 950 default: { 951 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 952 return 0; 953 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 954 } 955 } 956 } 957 958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 959 { 960 u16 reg; 961 962 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 963 return 0; 964 965 return (reg & PHY_NEG_PAUSE) != 0; 966 } 967 968 static int PHY_Setup(struct ql3_adapter *qdev) 969 { 970 u16 reg1; 971 u16 reg2; 972 bool agereAddrChangeNeeded = false; 973 u32 miiAddr = 0; 974 int err; 975 976 /* Determine the PHY we are using by reading the ID's */ 977 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 978 if (err != 0) { 979 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 980 return err; 981 } 982 983 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 984 if (err != 0) { 985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 986 return err; 987 } 988 989 /* Check if we have a Agere PHY */ 990 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 991 992 /* Determine which MII address we should be using 993 determined by the index of the card */ 994 if (qdev->mac_index == 0) 995 miiAddr = MII_AGERE_ADDR_1; 996 else 997 miiAddr = MII_AGERE_ADDR_2; 998 999 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1000 if (err != 0) { 1001 netdev_err(qdev->ndev, 1002 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1003 return err; 1004 } 1005 1006 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1007 if (err != 0) { 1008 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1009 return err; 1010 } 1011 1012 /* We need to remember to initialize the Agere PHY */ 1013 agereAddrChangeNeeded = true; 1014 } 1015 1016 /* Determine the particular PHY we have on board to apply 1017 PHY specific initializations */ 1018 qdev->phyType = getPhyType(qdev, reg1, reg2); 1019 1020 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1021 /* need this here so address gets changed */ 1022 phyAgereSpecificInit(qdev, miiAddr); 1023 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1024 netdev_err(qdev->ndev, "PHY is unknown\n"); 1025 return -EIO; 1026 } 1027 1028 return 0; 1029 } 1030 1031 /* 1032 * Caller holds hw_lock. 1033 */ 1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1035 { 1036 struct ql3xxx_port_registers __iomem *port_regs = 1037 qdev->mem_map_registers; 1038 u32 value; 1039 1040 if (enable) 1041 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1042 else 1043 value = (MAC_CONFIG_REG_PE << 16); 1044 1045 if (qdev->mac_index) 1046 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1047 else 1048 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1049 } 1050 1051 /* 1052 * Caller holds hw_lock. 1053 */ 1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1055 { 1056 struct ql3xxx_port_registers __iomem *port_regs = 1057 qdev->mem_map_registers; 1058 u32 value; 1059 1060 if (enable) 1061 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1062 else 1063 value = (MAC_CONFIG_REG_SR << 16); 1064 1065 if (qdev->mac_index) 1066 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1067 else 1068 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1069 } 1070 1071 /* 1072 * Caller holds hw_lock. 1073 */ 1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1075 { 1076 struct ql3xxx_port_registers __iomem *port_regs = 1077 qdev->mem_map_registers; 1078 u32 value; 1079 1080 if (enable) 1081 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1082 else 1083 value = (MAC_CONFIG_REG_GM << 16); 1084 1085 if (qdev->mac_index) 1086 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1087 else 1088 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1089 } 1090 1091 /* 1092 * Caller holds hw_lock. 1093 */ 1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1095 { 1096 struct ql3xxx_port_registers __iomem *port_regs = 1097 qdev->mem_map_registers; 1098 u32 value; 1099 1100 if (enable) 1101 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1102 else 1103 value = (MAC_CONFIG_REG_FD << 16); 1104 1105 if (qdev->mac_index) 1106 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1107 else 1108 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1109 } 1110 1111 /* 1112 * Caller holds hw_lock. 1113 */ 1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1115 { 1116 struct ql3xxx_port_registers __iomem *port_regs = 1117 qdev->mem_map_registers; 1118 u32 value; 1119 1120 if (enable) 1121 value = 1122 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1123 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1124 else 1125 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1126 1127 if (qdev->mac_index) 1128 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1129 else 1130 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1131 } 1132 1133 /* 1134 * Caller holds hw_lock. 1135 */ 1136 static int ql_is_fiber(struct ql3_adapter *qdev) 1137 { 1138 struct ql3xxx_port_registers __iomem *port_regs = 1139 qdev->mem_map_registers; 1140 u32 bitToCheck = 0; 1141 u32 temp; 1142 1143 switch (qdev->mac_index) { 1144 case 0: 1145 bitToCheck = PORT_STATUS_SM0; 1146 break; 1147 case 1: 1148 bitToCheck = PORT_STATUS_SM1; 1149 break; 1150 } 1151 1152 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1153 return (temp & bitToCheck) != 0; 1154 } 1155 1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1157 { 1158 u16 reg; 1159 ql_mii_read_reg(qdev, 0x00, ®); 1160 return (reg & 0x1000) != 0; 1161 } 1162 1163 /* 1164 * Caller holds hw_lock. 1165 */ 1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1167 { 1168 struct ql3xxx_port_registers __iomem *port_regs = 1169 qdev->mem_map_registers; 1170 u32 bitToCheck = 0; 1171 u32 temp; 1172 1173 switch (qdev->mac_index) { 1174 case 0: 1175 bitToCheck = PORT_STATUS_AC0; 1176 break; 1177 case 1: 1178 bitToCheck = PORT_STATUS_AC1; 1179 break; 1180 } 1181 1182 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1183 if (temp & bitToCheck) { 1184 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1185 return 1; 1186 } 1187 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1188 return 0; 1189 } 1190 1191 /* 1192 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1193 */ 1194 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1195 { 1196 if (ql_is_fiber(qdev)) 1197 return ql_is_petbi_neg_pause(qdev); 1198 else 1199 return ql_is_phy_neg_pause(qdev); 1200 } 1201 1202 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1203 { 1204 struct ql3xxx_port_registers __iomem *port_regs = 1205 qdev->mem_map_registers; 1206 u32 bitToCheck = 0; 1207 u32 temp; 1208 1209 switch (qdev->mac_index) { 1210 case 0: 1211 bitToCheck = PORT_STATUS_AE0; 1212 break; 1213 case 1: 1214 bitToCheck = PORT_STATUS_AE1; 1215 break; 1216 } 1217 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1218 return (temp & bitToCheck) != 0; 1219 } 1220 1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1222 { 1223 if (ql_is_fiber(qdev)) 1224 return SPEED_1000; 1225 else 1226 return ql_phy_get_speed(qdev); 1227 } 1228 1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1230 { 1231 if (ql_is_fiber(qdev)) 1232 return 1; 1233 else 1234 return ql_is_full_dup(qdev); 1235 } 1236 1237 /* 1238 * Caller holds hw_lock. 1239 */ 1240 static int ql_link_down_detect(struct ql3_adapter *qdev) 1241 { 1242 struct ql3xxx_port_registers __iomem *port_regs = 1243 qdev->mem_map_registers; 1244 u32 bitToCheck = 0; 1245 u32 temp; 1246 1247 switch (qdev->mac_index) { 1248 case 0: 1249 bitToCheck = ISP_CONTROL_LINK_DN_0; 1250 break; 1251 case 1: 1252 bitToCheck = ISP_CONTROL_LINK_DN_1; 1253 break; 1254 } 1255 1256 temp = 1257 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1258 return (temp & bitToCheck) != 0; 1259 } 1260 1261 /* 1262 * Caller holds hw_lock. 1263 */ 1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1265 { 1266 struct ql3xxx_port_registers __iomem *port_regs = 1267 qdev->mem_map_registers; 1268 1269 switch (qdev->mac_index) { 1270 case 0: 1271 ql_write_common_reg(qdev, 1272 &port_regs->CommonRegs.ispControlStatus, 1273 (ISP_CONTROL_LINK_DN_0) | 1274 (ISP_CONTROL_LINK_DN_0 << 16)); 1275 break; 1276 1277 case 1: 1278 ql_write_common_reg(qdev, 1279 &port_regs->CommonRegs.ispControlStatus, 1280 (ISP_CONTROL_LINK_DN_1) | 1281 (ISP_CONTROL_LINK_DN_1 << 16)); 1282 break; 1283 1284 default: 1285 return 1; 1286 } 1287 1288 return 0; 1289 } 1290 1291 /* 1292 * Caller holds hw_lock. 1293 */ 1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1295 { 1296 struct ql3xxx_port_registers __iomem *port_regs = 1297 qdev->mem_map_registers; 1298 u32 bitToCheck = 0; 1299 u32 temp; 1300 1301 switch (qdev->mac_index) { 1302 case 0: 1303 bitToCheck = PORT_STATUS_F1_ENABLED; 1304 break; 1305 case 1: 1306 bitToCheck = PORT_STATUS_F3_ENABLED; 1307 break; 1308 default: 1309 break; 1310 } 1311 1312 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1313 if (temp & bitToCheck) { 1314 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1315 "not link master\n"); 1316 return 0; 1317 } 1318 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1320 return 1; 1321 } 1322 1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1324 { 1325 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1326 PHYAddr[qdev->mac_index]); 1327 } 1328 1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1330 { 1331 u16 reg; 1332 u16 portConfiguration; 1333 1334 if (qdev->phyType == PHY_AGERE_ET1011C) 1335 ql_mii_write_reg(qdev, 0x13, 0x0000); 1336 /* turn off external loopback */ 1337 1338 if (qdev->mac_index == 0) 1339 portConfiguration = 1340 qdev->nvram_data.macCfg_port0.portConfiguration; 1341 else 1342 portConfiguration = 1343 qdev->nvram_data.macCfg_port1.portConfiguration; 1344 1345 /* Some HBA's in the field are set to 0 and they need to 1346 be reinterpreted with a default value */ 1347 if (portConfiguration == 0) 1348 portConfiguration = PORT_CONFIG_DEFAULT; 1349 1350 /* Set the 1000 advertisements */ 1351 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1352 PHYAddr[qdev->mac_index]); 1353 reg &= ~PHY_GIG_ALL_PARAMS; 1354 1355 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1356 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1357 reg |= PHY_GIG_ADV_1000F; 1358 else 1359 reg |= PHY_GIG_ADV_1000H; 1360 } 1361 1362 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1363 PHYAddr[qdev->mac_index]); 1364 1365 /* Set the 10/100 & pause negotiation advertisements */ 1366 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1367 PHYAddr[qdev->mac_index]); 1368 reg &= ~PHY_NEG_ALL_PARAMS; 1369 1370 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1371 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1372 1373 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1374 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1375 reg |= PHY_NEG_ADV_100F; 1376 1377 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1378 reg |= PHY_NEG_ADV_10F; 1379 } 1380 1381 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1382 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1383 reg |= PHY_NEG_ADV_100H; 1384 1385 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1386 reg |= PHY_NEG_ADV_10H; 1387 } 1388 1389 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1390 reg |= 1; 1391 1392 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1393 PHYAddr[qdev->mac_index]); 1394 1395 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1396 1397 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1398 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1399 PHYAddr[qdev->mac_index]); 1400 } 1401 1402 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1403 { 1404 ql_phy_reset_ex(qdev); 1405 PHY_Setup(qdev); 1406 ql_phy_start_neg_ex(qdev); 1407 } 1408 1409 /* 1410 * Caller holds hw_lock. 1411 */ 1412 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1413 { 1414 struct ql3xxx_port_registers __iomem *port_regs = 1415 qdev->mem_map_registers; 1416 u32 bitToCheck = 0; 1417 u32 temp, linkState; 1418 1419 switch (qdev->mac_index) { 1420 case 0: 1421 bitToCheck = PORT_STATUS_UP0; 1422 break; 1423 case 1: 1424 bitToCheck = PORT_STATUS_UP1; 1425 break; 1426 } 1427 1428 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1429 if (temp & bitToCheck) 1430 linkState = LS_UP; 1431 else 1432 linkState = LS_DOWN; 1433 1434 return linkState; 1435 } 1436 1437 static int ql_port_start(struct ql3_adapter *qdev) 1438 { 1439 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1440 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1441 2) << 7)) { 1442 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1443 return -1; 1444 } 1445 1446 if (ql_is_fiber(qdev)) { 1447 ql_petbi_init(qdev); 1448 } else { 1449 /* Copper port */ 1450 ql_phy_init_ex(qdev); 1451 } 1452 1453 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1454 return 0; 1455 } 1456 1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1458 { 1459 1460 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1461 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1462 2) << 7)) 1463 return -1; 1464 1465 if (!ql_auto_neg_error(qdev)) { 1466 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1467 /* configure the MAC */ 1468 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1469 "Configuring link\n"); 1470 ql_mac_cfg_soft_reset(qdev, 1); 1471 ql_mac_cfg_gig(qdev, 1472 (ql_get_link_speed 1473 (qdev) == 1474 SPEED_1000)); 1475 ql_mac_cfg_full_dup(qdev, 1476 ql_is_link_full_dup 1477 (qdev)); 1478 ql_mac_cfg_pause(qdev, 1479 ql_is_neg_pause 1480 (qdev)); 1481 ql_mac_cfg_soft_reset(qdev, 0); 1482 1483 /* enable the MAC */ 1484 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1485 "Enabling mac\n"); 1486 ql_mac_enable(qdev, 1); 1487 } 1488 1489 qdev->port_link_state = LS_UP; 1490 netif_start_queue(qdev->ndev); 1491 netif_carrier_on(qdev->ndev); 1492 netif_info(qdev, link, qdev->ndev, 1493 "Link is up at %d Mbps, %s duplex\n", 1494 ql_get_link_speed(qdev), 1495 ql_is_link_full_dup(qdev) ? "full" : "half"); 1496 1497 } else { /* Remote error detected */ 1498 1499 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1501 "Remote error detected. Calling ql_port_start()\n"); 1502 /* 1503 * ql_port_start() is shared code and needs 1504 * to lock the PHY on it's own. 1505 */ 1506 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1507 if (ql_port_start(qdev)) /* Restart port */ 1508 return -1; 1509 return 0; 1510 } 1511 } 1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1513 return 0; 1514 } 1515 1516 static void ql_link_state_machine_work(struct work_struct *work) 1517 { 1518 struct ql3_adapter *qdev = 1519 container_of(work, struct ql3_adapter, link_state_work.work); 1520 1521 u32 curr_link_state; 1522 unsigned long hw_flags; 1523 1524 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1525 1526 curr_link_state = ql_get_link_state(qdev); 1527 1528 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1529 netif_info(qdev, link, qdev->ndev, 1530 "Reset in progress, skip processing link state\n"); 1531 1532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1533 1534 /* Restart timer on 2 second interval. */ 1535 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1536 1537 return; 1538 } 1539 1540 switch (qdev->port_link_state) { 1541 default: 1542 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1543 ql_port_start(qdev); 1544 qdev->port_link_state = LS_DOWN; 1545 /* Fall Through */ 1546 1547 case LS_DOWN: 1548 if (curr_link_state == LS_UP) { 1549 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1550 if (ql_is_auto_neg_complete(qdev)) 1551 ql_finish_auto_neg(qdev); 1552 1553 if (qdev->port_link_state == LS_UP) 1554 ql_link_down_detect_clear(qdev); 1555 1556 qdev->port_link_state = LS_UP; 1557 } 1558 break; 1559 1560 case LS_UP: 1561 /* 1562 * See if the link is currently down or went down and came 1563 * back up 1564 */ 1565 if (curr_link_state == LS_DOWN) { 1566 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1567 qdev->port_link_state = LS_DOWN; 1568 } 1569 if (ql_link_down_detect(qdev)) 1570 qdev->port_link_state = LS_DOWN; 1571 break; 1572 } 1573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1574 1575 /* Restart timer on 2 second interval. */ 1576 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1577 } 1578 1579 /* 1580 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1581 */ 1582 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1583 { 1584 if (ql_this_adapter_controls_port(qdev)) 1585 set_bit(QL_LINK_MASTER, &qdev->flags); 1586 else 1587 clear_bit(QL_LINK_MASTER, &qdev->flags); 1588 } 1589 1590 /* 1591 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1592 */ 1593 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1594 { 1595 ql_mii_enable_scan_mode(qdev); 1596 1597 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1598 if (ql_this_adapter_controls_port(qdev)) 1599 ql_petbi_init_ex(qdev); 1600 } else { 1601 if (ql_this_adapter_controls_port(qdev)) 1602 ql_phy_init_ex(qdev); 1603 } 1604 } 1605 1606 /* 1607 * MII_Setup needs to be called before taking the PHY out of reset 1608 * so that the management interface clock speed can be set properly. 1609 * It would be better if we had a way to disable MDC until after the 1610 * PHY is out of reset, but we don't have that capability. 1611 */ 1612 static int ql_mii_setup(struct ql3_adapter *qdev) 1613 { 1614 u32 reg; 1615 struct ql3xxx_port_registers __iomem *port_regs = 1616 qdev->mem_map_registers; 1617 1618 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1619 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1620 2) << 7)) 1621 return -1; 1622 1623 if (qdev->device_id == QL3032_DEVICE_ID) 1624 ql_write_page0_reg(qdev, 1625 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1626 1627 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1628 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1629 1630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1631 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1632 1633 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1634 return 0; 1635 } 1636 1637 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1638 SUPPORTED_FIBRE | \ 1639 SUPPORTED_Autoneg) 1640 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1641 SUPPORTED_10baseT_Full | \ 1642 SUPPORTED_100baseT_Half | \ 1643 SUPPORTED_100baseT_Full | \ 1644 SUPPORTED_1000baseT_Half | \ 1645 SUPPORTED_1000baseT_Full | \ 1646 SUPPORTED_Autoneg | \ 1647 SUPPORTED_TP) \ 1648 1649 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1650 { 1651 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1652 return SUPPORTED_OPTICAL_MODES; 1653 1654 return SUPPORTED_TP_MODES; 1655 } 1656 1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1658 { 1659 int status; 1660 unsigned long hw_flags; 1661 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1662 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1663 (QL_RESOURCE_BITS_BASE_CODE | 1664 (qdev->mac_index) * 2) << 7)) { 1665 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1666 return 0; 1667 } 1668 status = ql_is_auto_cfg(qdev); 1669 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1671 return status; 1672 } 1673 1674 static u32 ql_get_speed(struct ql3_adapter *qdev) 1675 { 1676 u32 status; 1677 unsigned long hw_flags; 1678 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1679 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1680 (QL_RESOURCE_BITS_BASE_CODE | 1681 (qdev->mac_index) * 2) << 7)) { 1682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1683 return 0; 1684 } 1685 status = ql_get_link_speed(qdev); 1686 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1688 return status; 1689 } 1690 1691 static int ql_get_full_dup(struct ql3_adapter *qdev) 1692 { 1693 int status; 1694 unsigned long hw_flags; 1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1696 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1697 (QL_RESOURCE_BITS_BASE_CODE | 1698 (qdev->mac_index) * 2) << 7)) { 1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1700 return 0; 1701 } 1702 status = ql_is_link_full_dup(qdev); 1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1705 return status; 1706 } 1707 1708 static int ql_get_link_ksettings(struct net_device *ndev, 1709 struct ethtool_link_ksettings *cmd) 1710 { 1711 struct ql3_adapter *qdev = netdev_priv(ndev); 1712 u32 supported, advertising; 1713 1714 supported = ql_supported_modes(qdev); 1715 1716 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1717 cmd->base.port = PORT_FIBRE; 1718 } else { 1719 cmd->base.port = PORT_TP; 1720 cmd->base.phy_address = qdev->PHYAddr; 1721 } 1722 advertising = ql_supported_modes(qdev); 1723 cmd->base.autoneg = ql_get_auto_cfg_status(qdev); 1724 cmd->base.speed = ql_get_speed(qdev); 1725 cmd->base.duplex = ql_get_full_dup(qdev); 1726 1727 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1728 supported); 1729 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1730 advertising); 1731 1732 return 0; 1733 } 1734 1735 static void ql_get_drvinfo(struct net_device *ndev, 1736 struct ethtool_drvinfo *drvinfo) 1737 { 1738 struct ql3_adapter *qdev = netdev_priv(ndev); 1739 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1740 strlcpy(drvinfo->version, ql3xxx_driver_version, 1741 sizeof(drvinfo->version)); 1742 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1743 sizeof(drvinfo->bus_info)); 1744 } 1745 1746 static u32 ql_get_msglevel(struct net_device *ndev) 1747 { 1748 struct ql3_adapter *qdev = netdev_priv(ndev); 1749 return qdev->msg_enable; 1750 } 1751 1752 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1753 { 1754 struct ql3_adapter *qdev = netdev_priv(ndev); 1755 qdev->msg_enable = value; 1756 } 1757 1758 static void ql_get_pauseparam(struct net_device *ndev, 1759 struct ethtool_pauseparam *pause) 1760 { 1761 struct ql3_adapter *qdev = netdev_priv(ndev); 1762 struct ql3xxx_port_registers __iomem *port_regs = 1763 qdev->mem_map_registers; 1764 1765 u32 reg; 1766 if (qdev->mac_index == 0) 1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1768 else 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1770 1771 pause->autoneg = ql_get_auto_cfg_status(qdev); 1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1774 } 1775 1776 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1777 .get_drvinfo = ql_get_drvinfo, 1778 .get_link = ethtool_op_get_link, 1779 .get_msglevel = ql_get_msglevel, 1780 .set_msglevel = ql_set_msglevel, 1781 .get_pauseparam = ql_get_pauseparam, 1782 .get_link_ksettings = ql_get_link_ksettings, 1783 }; 1784 1785 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1786 { 1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1788 dma_addr_t map; 1789 int err; 1790 1791 while (lrg_buf_cb) { 1792 if (!lrg_buf_cb->skb) { 1793 lrg_buf_cb->skb = 1794 netdev_alloc_skb(qdev->ndev, 1795 qdev->lrg_buffer_len); 1796 if (unlikely(!lrg_buf_cb->skb)) { 1797 netdev_printk(KERN_DEBUG, qdev->ndev, 1798 "Failed netdev_alloc_skb()\n"); 1799 break; 1800 } else { 1801 /* 1802 * We save some space to copy the ethhdr from 1803 * first buffer 1804 */ 1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1806 map = pci_map_single(qdev->pdev, 1807 lrg_buf_cb->skb->data, 1808 qdev->lrg_buffer_len - 1809 QL_HEADER_SPACE, 1810 PCI_DMA_FROMDEVICE); 1811 1812 err = pci_dma_mapping_error(qdev->pdev, map); 1813 if (err) { 1814 netdev_err(qdev->ndev, 1815 "PCI mapping failed with error: %d\n", 1816 err); 1817 dev_kfree_skb(lrg_buf_cb->skb); 1818 lrg_buf_cb->skb = NULL; 1819 break; 1820 } 1821 1822 1823 lrg_buf_cb->buf_phy_addr_low = 1824 cpu_to_le32(LS_64BITS(map)); 1825 lrg_buf_cb->buf_phy_addr_high = 1826 cpu_to_le32(MS_64BITS(map)); 1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1828 dma_unmap_len_set(lrg_buf_cb, maplen, 1829 qdev->lrg_buffer_len - 1830 QL_HEADER_SPACE); 1831 --qdev->lrg_buf_skb_check; 1832 if (!qdev->lrg_buf_skb_check) 1833 return 1; 1834 } 1835 } 1836 lrg_buf_cb = lrg_buf_cb->next; 1837 } 1838 return 0; 1839 } 1840 1841 /* 1842 * Caller holds hw_lock. 1843 */ 1844 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1845 { 1846 struct ql3xxx_port_registers __iomem *port_regs = 1847 qdev->mem_map_registers; 1848 1849 if (qdev->small_buf_release_cnt >= 16) { 1850 while (qdev->small_buf_release_cnt >= 16) { 1851 qdev->small_buf_q_producer_index++; 1852 1853 if (qdev->small_buf_q_producer_index == 1854 NUM_SBUFQ_ENTRIES) 1855 qdev->small_buf_q_producer_index = 0; 1856 qdev->small_buf_release_cnt -= 8; 1857 } 1858 wmb(); 1859 writel_relaxed(qdev->small_buf_q_producer_index, 1860 &port_regs->CommonRegs.rxSmallQProducerIndex); 1861 } 1862 } 1863 1864 /* 1865 * Caller holds hw_lock. 1866 */ 1867 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1868 { 1869 struct bufq_addr_element *lrg_buf_q_ele; 1870 int i; 1871 struct ql_rcv_buf_cb *lrg_buf_cb; 1872 struct ql3xxx_port_registers __iomem *port_regs = 1873 qdev->mem_map_registers; 1874 1875 if ((qdev->lrg_buf_free_count >= 8) && 1876 (qdev->lrg_buf_release_cnt >= 16)) { 1877 1878 if (qdev->lrg_buf_skb_check) 1879 if (!ql_populate_free_queue(qdev)) 1880 return; 1881 1882 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1883 1884 while ((qdev->lrg_buf_release_cnt >= 16) && 1885 (qdev->lrg_buf_free_count >= 8)) { 1886 1887 for (i = 0; i < 8; i++) { 1888 lrg_buf_cb = 1889 ql_get_from_lrg_buf_free_list(qdev); 1890 lrg_buf_q_ele->addr_high = 1891 lrg_buf_cb->buf_phy_addr_high; 1892 lrg_buf_q_ele->addr_low = 1893 lrg_buf_cb->buf_phy_addr_low; 1894 lrg_buf_q_ele++; 1895 1896 qdev->lrg_buf_release_cnt--; 1897 } 1898 1899 qdev->lrg_buf_q_producer_index++; 1900 1901 if (qdev->lrg_buf_q_producer_index == 1902 qdev->num_lbufq_entries) 1903 qdev->lrg_buf_q_producer_index = 0; 1904 1905 if (qdev->lrg_buf_q_producer_index == 1906 (qdev->num_lbufq_entries - 1)) { 1907 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1908 } 1909 } 1910 wmb(); 1911 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1912 writel(qdev->lrg_buf_q_producer_index, 1913 &port_regs->CommonRegs.rxLargeQProducerIndex); 1914 } 1915 } 1916 1917 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1918 struct ob_mac_iocb_rsp *mac_rsp) 1919 { 1920 struct ql_tx_buf_cb *tx_cb; 1921 int i; 1922 1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1924 netdev_warn(qdev->ndev, 1925 "Frame too short but it was padded and sent\n"); 1926 } 1927 1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1929 1930 /* Check the transmit response flags for any errors */ 1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1932 netdev_err(qdev->ndev, 1933 "Frame too short to be legal, frame not sent\n"); 1934 1935 qdev->ndev->stats.tx_errors++; 1936 goto frame_not_sent; 1937 } 1938 1939 if (tx_cb->seg_count == 0) { 1940 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1941 mac_rsp->transaction_id); 1942 1943 qdev->ndev->stats.tx_errors++; 1944 goto invalid_seg_count; 1945 } 1946 1947 pci_unmap_single(qdev->pdev, 1948 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1949 dma_unmap_len(&tx_cb->map[0], maplen), 1950 PCI_DMA_TODEVICE); 1951 tx_cb->seg_count--; 1952 if (tx_cb->seg_count) { 1953 for (i = 1; i < tx_cb->seg_count; i++) { 1954 pci_unmap_page(qdev->pdev, 1955 dma_unmap_addr(&tx_cb->map[i], 1956 mapaddr), 1957 dma_unmap_len(&tx_cb->map[i], maplen), 1958 PCI_DMA_TODEVICE); 1959 } 1960 } 1961 qdev->ndev->stats.tx_packets++; 1962 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1963 1964 frame_not_sent: 1965 dev_kfree_skb_irq(tx_cb->skb); 1966 tx_cb->skb = NULL; 1967 1968 invalid_seg_count: 1969 atomic_inc(&qdev->tx_count); 1970 } 1971 1972 static void ql_get_sbuf(struct ql3_adapter *qdev) 1973 { 1974 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1975 qdev->small_buf_index = 0; 1976 qdev->small_buf_release_cnt++; 1977 } 1978 1979 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1980 { 1981 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1982 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1983 qdev->lrg_buf_release_cnt++; 1984 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1985 qdev->lrg_buf_index = 0; 1986 return lrg_buf_cb; 1987 } 1988 1989 /* 1990 * The difference between 3022 and 3032 for inbound completions: 1991 * 3022 uses two buffers per completion. The first buffer contains 1992 * (some) header info, the second the remainder of the headers plus 1993 * the data. For this chip we reserve some space at the top of the 1994 * receive buffer so that the header info in buffer one can be 1995 * prepended to the buffer two. Buffer two is the sent up while 1996 * buffer one is returned to the hardware to be reused. 1997 * 3032 receives all of it's data and headers in one buffer for a 1998 * simpler process. 3032 also supports checksum verification as 1999 * can be seen in ql_process_macip_rx_intr(). 2000 */ 2001 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2002 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2003 { 2004 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2005 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2006 struct sk_buff *skb; 2007 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2008 2009 /* 2010 * Get the inbound address list (small buffer). 2011 */ 2012 ql_get_sbuf(qdev); 2013 2014 if (qdev->device_id == QL3022_DEVICE_ID) 2015 lrg_buf_cb1 = ql_get_lbuf(qdev); 2016 2017 /* start of second buffer */ 2018 lrg_buf_cb2 = ql_get_lbuf(qdev); 2019 skb = lrg_buf_cb2->skb; 2020 2021 qdev->ndev->stats.rx_packets++; 2022 qdev->ndev->stats.rx_bytes += length; 2023 2024 skb_put(skb, length); 2025 pci_unmap_single(qdev->pdev, 2026 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2027 dma_unmap_len(lrg_buf_cb2, maplen), 2028 PCI_DMA_FROMDEVICE); 2029 prefetch(skb->data); 2030 skb_checksum_none_assert(skb); 2031 skb->protocol = eth_type_trans(skb, qdev->ndev); 2032 2033 napi_gro_receive(&qdev->napi, skb); 2034 lrg_buf_cb2->skb = NULL; 2035 2036 if (qdev->device_id == QL3022_DEVICE_ID) 2037 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2038 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2039 } 2040 2041 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2042 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2043 { 2044 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2045 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2046 struct sk_buff *skb1 = NULL, *skb2; 2047 struct net_device *ndev = qdev->ndev; 2048 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2049 u16 size = 0; 2050 2051 /* 2052 * Get the inbound address list (small buffer). 2053 */ 2054 2055 ql_get_sbuf(qdev); 2056 2057 if (qdev->device_id == QL3022_DEVICE_ID) { 2058 /* start of first buffer on 3022 */ 2059 lrg_buf_cb1 = ql_get_lbuf(qdev); 2060 skb1 = lrg_buf_cb1->skb; 2061 size = ETH_HLEN; 2062 if (*((u16 *) skb1->data) != 0xFFFF) 2063 size += VLAN_ETH_HLEN - ETH_HLEN; 2064 } 2065 2066 /* start of second buffer */ 2067 lrg_buf_cb2 = ql_get_lbuf(qdev); 2068 skb2 = lrg_buf_cb2->skb; 2069 2070 skb_put(skb2, length); /* Just the second buffer length here. */ 2071 pci_unmap_single(qdev->pdev, 2072 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2073 dma_unmap_len(lrg_buf_cb2, maplen), 2074 PCI_DMA_FROMDEVICE); 2075 prefetch(skb2->data); 2076 2077 skb_checksum_none_assert(skb2); 2078 if (qdev->device_id == QL3022_DEVICE_ID) { 2079 /* 2080 * Copy the ethhdr from first buffer to second. This 2081 * is necessary for 3022 IP completions. 2082 */ 2083 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2084 skb_push(skb2, size), size); 2085 } else { 2086 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2087 if (checksum & 2088 (IB_IP_IOCB_RSP_3032_ICE | 2089 IB_IP_IOCB_RSP_3032_CE)) { 2090 netdev_err(ndev, 2091 "%s: Bad checksum for this %s packet, checksum = %x\n", 2092 __func__, 2093 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2094 "TCP" : "UDP"), checksum); 2095 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2096 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2097 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2098 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2099 } 2100 } 2101 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2102 2103 napi_gro_receive(&qdev->napi, skb2); 2104 ndev->stats.rx_packets++; 2105 ndev->stats.rx_bytes += length; 2106 lrg_buf_cb2->skb = NULL; 2107 2108 if (qdev->device_id == QL3022_DEVICE_ID) 2109 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2110 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2111 } 2112 2113 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) 2114 { 2115 struct net_rsp_iocb *net_rsp; 2116 struct net_device *ndev = qdev->ndev; 2117 int work_done = 0; 2118 2119 /* While there are entries in the completion queue. */ 2120 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2121 qdev->rsp_consumer_index) && (work_done < budget)) { 2122 2123 net_rsp = qdev->rsp_current; 2124 rmb(); 2125 /* 2126 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2127 * if the inbound completion is for a VLAN. 2128 */ 2129 if (qdev->device_id == QL3032_DEVICE_ID) 2130 net_rsp->opcode &= 0x7f; 2131 switch (net_rsp->opcode) { 2132 2133 case OPCODE_OB_MAC_IOCB_FN0: 2134 case OPCODE_OB_MAC_IOCB_FN2: 2135 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2136 net_rsp); 2137 break; 2138 2139 case OPCODE_IB_MAC_IOCB: 2140 case OPCODE_IB_3032_MAC_IOCB: 2141 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2142 net_rsp); 2143 work_done++; 2144 break; 2145 2146 case OPCODE_IB_IP_IOCB: 2147 case OPCODE_IB_3032_IP_IOCB: 2148 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2149 net_rsp); 2150 work_done++; 2151 break; 2152 default: { 2153 u32 *tmp = (u32 *)net_rsp; 2154 netdev_err(ndev, 2155 "Hit default case, not handled!\n" 2156 " dropping the packet, opcode = %x\n" 2157 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2158 net_rsp->opcode, 2159 (unsigned long int)tmp[0], 2160 (unsigned long int)tmp[1], 2161 (unsigned long int)tmp[2], 2162 (unsigned long int)tmp[3]); 2163 } 2164 } 2165 2166 qdev->rsp_consumer_index++; 2167 2168 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2169 qdev->rsp_consumer_index = 0; 2170 qdev->rsp_current = qdev->rsp_q_virt_addr; 2171 } else { 2172 qdev->rsp_current++; 2173 } 2174 2175 } 2176 2177 return work_done; 2178 } 2179 2180 static int ql_poll(struct napi_struct *napi, int budget) 2181 { 2182 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2183 struct ql3xxx_port_registers __iomem *port_regs = 2184 qdev->mem_map_registers; 2185 int work_done; 2186 2187 work_done = ql_tx_rx_clean(qdev, budget); 2188 2189 if (work_done < budget && napi_complete_done(napi, work_done)) { 2190 unsigned long flags; 2191 2192 spin_lock_irqsave(&qdev->hw_lock, flags); 2193 ql_update_small_bufq_prod_index(qdev); 2194 ql_update_lrg_bufq_prod_index(qdev); 2195 writel(qdev->rsp_consumer_index, 2196 &port_regs->CommonRegs.rspQConsumerIndex); 2197 spin_unlock_irqrestore(&qdev->hw_lock, flags); 2198 2199 ql_enable_interrupts(qdev); 2200 } 2201 return work_done; 2202 } 2203 2204 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2205 { 2206 2207 struct net_device *ndev = dev_id; 2208 struct ql3_adapter *qdev = netdev_priv(ndev); 2209 struct ql3xxx_port_registers __iomem *port_regs = 2210 qdev->mem_map_registers; 2211 u32 value; 2212 int handled = 1; 2213 u32 var; 2214 2215 value = ql_read_common_reg_l(qdev, 2216 &port_regs->CommonRegs.ispControlStatus); 2217 2218 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2219 spin_lock(&qdev->adapter_lock); 2220 netif_stop_queue(qdev->ndev); 2221 netif_carrier_off(qdev->ndev); 2222 ql_disable_interrupts(qdev); 2223 qdev->port_link_state = LS_DOWN; 2224 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2225 2226 if (value & ISP_CONTROL_FE) { 2227 /* 2228 * Chip Fatal Error. 2229 */ 2230 var = 2231 ql_read_page0_reg_l(qdev, 2232 &port_regs->PortFatalErrStatus); 2233 netdev_warn(ndev, 2234 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2235 var); 2236 set_bit(QL_RESET_START, &qdev->flags) ; 2237 } else { 2238 /* 2239 * Soft Reset Requested. 2240 */ 2241 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2242 netdev_err(ndev, 2243 "Another function issued a reset to the chip. ISR value = %x\n", 2244 value); 2245 } 2246 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2247 spin_unlock(&qdev->adapter_lock); 2248 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2249 ql_disable_interrupts(qdev); 2250 if (likely(napi_schedule_prep(&qdev->napi))) 2251 __napi_schedule(&qdev->napi); 2252 } else 2253 return IRQ_NONE; 2254 2255 return IRQ_RETVAL(handled); 2256 } 2257 2258 /* 2259 * Get the total number of segments needed for the given number of fragments. 2260 * This is necessary because outbound address lists (OAL) will be used when 2261 * more than two frags are given. Each address list has 5 addr/len pairs. 2262 * The 5th pair in each OAL is used to point to the next OAL if more frags 2263 * are coming. That is why the frags:segment count ratio is not linear. 2264 */ 2265 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2266 { 2267 if (qdev->device_id == QL3022_DEVICE_ID) 2268 return 1; 2269 2270 if (frags <= 2) 2271 return frags + 1; 2272 else if (frags <= 6) 2273 return frags + 2; 2274 else if (frags <= 10) 2275 return frags + 3; 2276 else if (frags <= 14) 2277 return frags + 4; 2278 else if (frags <= 18) 2279 return frags + 5; 2280 return -1; 2281 } 2282 2283 static void ql_hw_csum_setup(const struct sk_buff *skb, 2284 struct ob_mac_iocb_req *mac_iocb_ptr) 2285 { 2286 const struct iphdr *ip = ip_hdr(skb); 2287 2288 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2289 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2290 2291 if (ip->protocol == IPPROTO_TCP) { 2292 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2293 OB_3032MAC_IOCB_REQ_IC; 2294 } else { 2295 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2296 OB_3032MAC_IOCB_REQ_IC; 2297 } 2298 2299 } 2300 2301 /* 2302 * Map the buffers for this transmit. 2303 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2304 */ 2305 static int ql_send_map(struct ql3_adapter *qdev, 2306 struct ob_mac_iocb_req *mac_iocb_ptr, 2307 struct ql_tx_buf_cb *tx_cb, 2308 struct sk_buff *skb) 2309 { 2310 struct oal *oal; 2311 struct oal_entry *oal_entry; 2312 int len = skb_headlen(skb); 2313 dma_addr_t map; 2314 int err; 2315 int completed_segs, i; 2316 int seg_cnt, seg = 0; 2317 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2318 2319 seg_cnt = tx_cb->seg_count; 2320 /* 2321 * Map the skb buffer first. 2322 */ 2323 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2324 2325 err = pci_dma_mapping_error(qdev->pdev, map); 2326 if (err) { 2327 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2328 err); 2329 2330 return NETDEV_TX_BUSY; 2331 } 2332 2333 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2334 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2335 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2336 oal_entry->len = cpu_to_le32(len); 2337 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2338 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2339 seg++; 2340 2341 if (seg_cnt == 1) { 2342 /* Terminate the last segment. */ 2343 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2344 return NETDEV_TX_OK; 2345 } 2346 oal = tx_cb->oal; 2347 for (completed_segs = 0; 2348 completed_segs < frag_cnt; 2349 completed_segs++, seg++) { 2350 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2351 oal_entry++; 2352 /* 2353 * Check for continuation requirements. 2354 * It's strange but necessary. 2355 * Continuation entry points to outbound address list. 2356 */ 2357 if ((seg == 2 && seg_cnt > 3) || 2358 (seg == 7 && seg_cnt > 8) || 2359 (seg == 12 && seg_cnt > 13) || 2360 (seg == 17 && seg_cnt > 18)) { 2361 map = pci_map_single(qdev->pdev, oal, 2362 sizeof(struct oal), 2363 PCI_DMA_TODEVICE); 2364 2365 err = pci_dma_mapping_error(qdev->pdev, map); 2366 if (err) { 2367 netdev_err(qdev->ndev, 2368 "PCI mapping outbound address list with error: %d\n", 2369 err); 2370 goto map_error; 2371 } 2372 2373 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2374 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2375 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2376 OAL_CONT_ENTRY); 2377 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2378 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2379 sizeof(struct oal)); 2380 oal_entry = (struct oal_entry *)oal; 2381 oal++; 2382 seg++; 2383 } 2384 2385 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2386 DMA_TO_DEVICE); 2387 2388 err = dma_mapping_error(&qdev->pdev->dev, map); 2389 if (err) { 2390 netdev_err(qdev->ndev, 2391 "PCI mapping frags failed with error: %d\n", 2392 err); 2393 goto map_error; 2394 } 2395 2396 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2397 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2398 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2399 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2400 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2401 } 2402 /* Terminate the last segment. */ 2403 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2404 return NETDEV_TX_OK; 2405 2406 map_error: 2407 /* A PCI mapping failed and now we will need to back out 2408 * We need to traverse through the oal's and associated pages which 2409 * have been mapped and now we must unmap them to clean up properly 2410 */ 2411 2412 seg = 1; 2413 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2414 oal = tx_cb->oal; 2415 for (i = 0; i < completed_segs; i++, seg++) { 2416 oal_entry++; 2417 2418 /* 2419 * Check for continuation requirements. 2420 * It's strange but necessary. 2421 */ 2422 2423 if ((seg == 2 && seg_cnt > 3) || 2424 (seg == 7 && seg_cnt > 8) || 2425 (seg == 12 && seg_cnt > 13) || 2426 (seg == 17 && seg_cnt > 18)) { 2427 pci_unmap_single(qdev->pdev, 2428 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2429 dma_unmap_len(&tx_cb->map[seg], maplen), 2430 PCI_DMA_TODEVICE); 2431 oal++; 2432 seg++; 2433 } 2434 2435 pci_unmap_page(qdev->pdev, 2436 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2437 dma_unmap_len(&tx_cb->map[seg], maplen), 2438 PCI_DMA_TODEVICE); 2439 } 2440 2441 pci_unmap_single(qdev->pdev, 2442 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2443 dma_unmap_addr(&tx_cb->map[0], maplen), 2444 PCI_DMA_TODEVICE); 2445 2446 return NETDEV_TX_BUSY; 2447 2448 } 2449 2450 /* 2451 * The difference between 3022 and 3032 sends: 2452 * 3022 only supports a simple single segment transmission. 2453 * 3032 supports checksumming and scatter/gather lists (fragments). 2454 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2455 * in the IOCB plus a chain of outbound address lists (OAL) that 2456 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2457 * will be used to point to an OAL when more ALP entries are required. 2458 * The IOCB is always the top of the chain followed by one or more 2459 * OALs (when necessary). 2460 */ 2461 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2462 struct net_device *ndev) 2463 { 2464 struct ql3_adapter *qdev = netdev_priv(ndev); 2465 struct ql3xxx_port_registers __iomem *port_regs = 2466 qdev->mem_map_registers; 2467 struct ql_tx_buf_cb *tx_cb; 2468 u32 tot_len = skb->len; 2469 struct ob_mac_iocb_req *mac_iocb_ptr; 2470 2471 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2472 return NETDEV_TX_BUSY; 2473 2474 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2475 tx_cb->seg_count = ql_get_seg_count(qdev, 2476 skb_shinfo(skb)->nr_frags); 2477 if (tx_cb->seg_count == -1) { 2478 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2479 return NETDEV_TX_OK; 2480 } 2481 2482 mac_iocb_ptr = tx_cb->queue_entry; 2483 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2484 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2485 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2486 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2487 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2488 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2489 tx_cb->skb = skb; 2490 if (qdev->device_id == QL3032_DEVICE_ID && 2491 skb->ip_summed == CHECKSUM_PARTIAL) 2492 ql_hw_csum_setup(skb, mac_iocb_ptr); 2493 2494 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2495 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2496 return NETDEV_TX_BUSY; 2497 } 2498 2499 wmb(); 2500 qdev->req_producer_index++; 2501 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2502 qdev->req_producer_index = 0; 2503 wmb(); 2504 ql_write_common_reg_l(qdev, 2505 &port_regs->CommonRegs.reqQProducerIndex, 2506 qdev->req_producer_index); 2507 2508 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2509 "tx queued, slot %d, len %d\n", 2510 qdev->req_producer_index, skb->len); 2511 2512 atomic_dec(&qdev->tx_count); 2513 return NETDEV_TX_OK; 2514 } 2515 2516 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2517 { 2518 qdev->req_q_size = 2519 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2520 2521 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2522 2523 /* The barrier is required to ensure request and response queue 2524 * addr writes to the registers. 2525 */ 2526 wmb(); 2527 2528 qdev->req_q_virt_addr = 2529 pci_alloc_consistent(qdev->pdev, 2530 (size_t) qdev->req_q_size, 2531 &qdev->req_q_phy_addr); 2532 2533 if ((qdev->req_q_virt_addr == NULL) || 2534 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2535 netdev_err(qdev->ndev, "reqQ failed\n"); 2536 return -ENOMEM; 2537 } 2538 2539 qdev->rsp_q_virt_addr = 2540 pci_alloc_consistent(qdev->pdev, 2541 (size_t) qdev->rsp_q_size, 2542 &qdev->rsp_q_phy_addr); 2543 2544 if ((qdev->rsp_q_virt_addr == NULL) || 2545 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2546 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2547 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2548 qdev->req_q_virt_addr, 2549 qdev->req_q_phy_addr); 2550 return -ENOMEM; 2551 } 2552 2553 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2554 2555 return 0; 2556 } 2557 2558 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2559 { 2560 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2561 netdev_info(qdev->ndev, "Already done\n"); 2562 return; 2563 } 2564 2565 pci_free_consistent(qdev->pdev, 2566 qdev->req_q_size, 2567 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2568 2569 qdev->req_q_virt_addr = NULL; 2570 2571 pci_free_consistent(qdev->pdev, 2572 qdev->rsp_q_size, 2573 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2574 2575 qdev->rsp_q_virt_addr = NULL; 2576 2577 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2578 } 2579 2580 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2581 { 2582 /* Create Large Buffer Queue */ 2583 qdev->lrg_buf_q_size = 2584 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2585 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2586 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2587 else 2588 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2589 2590 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2591 sizeof(struct ql_rcv_buf_cb), 2592 GFP_KERNEL); 2593 if (qdev->lrg_buf == NULL) 2594 return -ENOMEM; 2595 2596 qdev->lrg_buf_q_alloc_virt_addr = 2597 pci_alloc_consistent(qdev->pdev, 2598 qdev->lrg_buf_q_alloc_size, 2599 &qdev->lrg_buf_q_alloc_phy_addr); 2600 2601 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2602 netdev_err(qdev->ndev, "lBufQ failed\n"); 2603 return -ENOMEM; 2604 } 2605 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2606 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2607 2608 /* Create Small Buffer Queue */ 2609 qdev->small_buf_q_size = 2610 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2611 if (qdev->small_buf_q_size < PAGE_SIZE) 2612 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2613 else 2614 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2615 2616 qdev->small_buf_q_alloc_virt_addr = 2617 pci_alloc_consistent(qdev->pdev, 2618 qdev->small_buf_q_alloc_size, 2619 &qdev->small_buf_q_alloc_phy_addr); 2620 2621 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2622 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2623 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2624 qdev->lrg_buf_q_alloc_virt_addr, 2625 qdev->lrg_buf_q_alloc_phy_addr); 2626 return -ENOMEM; 2627 } 2628 2629 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2630 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2631 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2632 return 0; 2633 } 2634 2635 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2636 { 2637 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2638 netdev_info(qdev->ndev, "Already done\n"); 2639 return; 2640 } 2641 kfree(qdev->lrg_buf); 2642 pci_free_consistent(qdev->pdev, 2643 qdev->lrg_buf_q_alloc_size, 2644 qdev->lrg_buf_q_alloc_virt_addr, 2645 qdev->lrg_buf_q_alloc_phy_addr); 2646 2647 qdev->lrg_buf_q_virt_addr = NULL; 2648 2649 pci_free_consistent(qdev->pdev, 2650 qdev->small_buf_q_alloc_size, 2651 qdev->small_buf_q_alloc_virt_addr, 2652 qdev->small_buf_q_alloc_phy_addr); 2653 2654 qdev->small_buf_q_virt_addr = NULL; 2655 2656 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2657 } 2658 2659 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2660 { 2661 int i; 2662 struct bufq_addr_element *small_buf_q_entry; 2663 2664 /* Currently we allocate on one of memory and use it for smallbuffers */ 2665 qdev->small_buf_total_size = 2666 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2667 QL_SMALL_BUFFER_SIZE); 2668 2669 qdev->small_buf_virt_addr = 2670 pci_alloc_consistent(qdev->pdev, 2671 qdev->small_buf_total_size, 2672 &qdev->small_buf_phy_addr); 2673 2674 if (qdev->small_buf_virt_addr == NULL) { 2675 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2676 return -ENOMEM; 2677 } 2678 2679 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2680 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2681 2682 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2683 2684 /* Initialize the small buffer queue. */ 2685 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2686 small_buf_q_entry->addr_high = 2687 cpu_to_le32(qdev->small_buf_phy_addr_high); 2688 small_buf_q_entry->addr_low = 2689 cpu_to_le32(qdev->small_buf_phy_addr_low + 2690 (i * QL_SMALL_BUFFER_SIZE)); 2691 small_buf_q_entry++; 2692 } 2693 qdev->small_buf_index = 0; 2694 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2695 return 0; 2696 } 2697 2698 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2699 { 2700 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2701 netdev_info(qdev->ndev, "Already done\n"); 2702 return; 2703 } 2704 if (qdev->small_buf_virt_addr != NULL) { 2705 pci_free_consistent(qdev->pdev, 2706 qdev->small_buf_total_size, 2707 qdev->small_buf_virt_addr, 2708 qdev->small_buf_phy_addr); 2709 2710 qdev->small_buf_virt_addr = NULL; 2711 } 2712 } 2713 2714 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2715 { 2716 int i = 0; 2717 struct ql_rcv_buf_cb *lrg_buf_cb; 2718 2719 for (i = 0; i < qdev->num_large_buffers; i++) { 2720 lrg_buf_cb = &qdev->lrg_buf[i]; 2721 if (lrg_buf_cb->skb) { 2722 dev_kfree_skb(lrg_buf_cb->skb); 2723 pci_unmap_single(qdev->pdev, 2724 dma_unmap_addr(lrg_buf_cb, mapaddr), 2725 dma_unmap_len(lrg_buf_cb, maplen), 2726 PCI_DMA_FROMDEVICE); 2727 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2728 } else { 2729 break; 2730 } 2731 } 2732 } 2733 2734 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2735 { 2736 int i; 2737 struct ql_rcv_buf_cb *lrg_buf_cb; 2738 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2739 2740 for (i = 0; i < qdev->num_large_buffers; i++) { 2741 lrg_buf_cb = &qdev->lrg_buf[i]; 2742 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2743 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2744 buf_addr_ele++; 2745 } 2746 qdev->lrg_buf_index = 0; 2747 qdev->lrg_buf_skb_check = 0; 2748 } 2749 2750 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2751 { 2752 int i; 2753 struct ql_rcv_buf_cb *lrg_buf_cb; 2754 struct sk_buff *skb; 2755 dma_addr_t map; 2756 int err; 2757 2758 for (i = 0; i < qdev->num_large_buffers; i++) { 2759 skb = netdev_alloc_skb(qdev->ndev, 2760 qdev->lrg_buffer_len); 2761 if (unlikely(!skb)) { 2762 /* Better luck next round */ 2763 netdev_err(qdev->ndev, 2764 "large buff alloc failed for %d bytes at index %d\n", 2765 qdev->lrg_buffer_len * 2, i); 2766 ql_free_large_buffers(qdev); 2767 return -ENOMEM; 2768 } else { 2769 2770 lrg_buf_cb = &qdev->lrg_buf[i]; 2771 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2772 lrg_buf_cb->index = i; 2773 lrg_buf_cb->skb = skb; 2774 /* 2775 * We save some space to copy the ethhdr from first 2776 * buffer 2777 */ 2778 skb_reserve(skb, QL_HEADER_SPACE); 2779 map = pci_map_single(qdev->pdev, 2780 skb->data, 2781 qdev->lrg_buffer_len - 2782 QL_HEADER_SPACE, 2783 PCI_DMA_FROMDEVICE); 2784 2785 err = pci_dma_mapping_error(qdev->pdev, map); 2786 if (err) { 2787 netdev_err(qdev->ndev, 2788 "PCI mapping failed with error: %d\n", 2789 err); 2790 ql_free_large_buffers(qdev); 2791 return -ENOMEM; 2792 } 2793 2794 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2795 dma_unmap_len_set(lrg_buf_cb, maplen, 2796 qdev->lrg_buffer_len - 2797 QL_HEADER_SPACE); 2798 lrg_buf_cb->buf_phy_addr_low = 2799 cpu_to_le32(LS_64BITS(map)); 2800 lrg_buf_cb->buf_phy_addr_high = 2801 cpu_to_le32(MS_64BITS(map)); 2802 } 2803 } 2804 return 0; 2805 } 2806 2807 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2808 { 2809 struct ql_tx_buf_cb *tx_cb; 2810 int i; 2811 2812 tx_cb = &qdev->tx_buf[0]; 2813 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2814 kfree(tx_cb->oal); 2815 tx_cb->oal = NULL; 2816 tx_cb++; 2817 } 2818 } 2819 2820 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2821 { 2822 struct ql_tx_buf_cb *tx_cb; 2823 int i; 2824 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2825 2826 /* Create free list of transmit buffers */ 2827 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2828 2829 tx_cb = &qdev->tx_buf[i]; 2830 tx_cb->skb = NULL; 2831 tx_cb->queue_entry = req_q_curr; 2832 req_q_curr++; 2833 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2834 if (tx_cb->oal == NULL) 2835 return -ENOMEM; 2836 } 2837 return 0; 2838 } 2839 2840 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2841 { 2842 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2843 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2844 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2845 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2846 /* 2847 * Bigger buffers, so less of them. 2848 */ 2849 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2850 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2851 } else { 2852 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2853 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2854 return -ENOMEM; 2855 } 2856 qdev->num_large_buffers = 2857 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2858 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2859 qdev->max_frame_size = 2860 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2861 2862 /* 2863 * First allocate a page of shared memory and use it for shadow 2864 * locations of Network Request Queue Consumer Address Register and 2865 * Network Completion Queue Producer Index Register 2866 */ 2867 qdev->shadow_reg_virt_addr = 2868 pci_alloc_consistent(qdev->pdev, 2869 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2870 2871 if (qdev->shadow_reg_virt_addr != NULL) { 2872 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2873 qdev->req_consumer_index_phy_addr_high = 2874 MS_64BITS(qdev->shadow_reg_phy_addr); 2875 qdev->req_consumer_index_phy_addr_low = 2876 LS_64BITS(qdev->shadow_reg_phy_addr); 2877 2878 qdev->prsp_producer_index = 2879 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2880 qdev->rsp_producer_index_phy_addr_high = 2881 qdev->req_consumer_index_phy_addr_high; 2882 qdev->rsp_producer_index_phy_addr_low = 2883 qdev->req_consumer_index_phy_addr_low + 8; 2884 } else { 2885 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2886 return -ENOMEM; 2887 } 2888 2889 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2890 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2891 goto err_req_rsp; 2892 } 2893 2894 if (ql_alloc_buffer_queues(qdev) != 0) { 2895 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2896 goto err_buffer_queues; 2897 } 2898 2899 if (ql_alloc_small_buffers(qdev) != 0) { 2900 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2901 goto err_small_buffers; 2902 } 2903 2904 if (ql_alloc_large_buffers(qdev) != 0) { 2905 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2906 goto err_small_buffers; 2907 } 2908 2909 /* Initialize the large buffer queue. */ 2910 ql_init_large_buffers(qdev); 2911 if (ql_create_send_free_list(qdev)) 2912 goto err_free_list; 2913 2914 qdev->rsp_current = qdev->rsp_q_virt_addr; 2915 2916 return 0; 2917 err_free_list: 2918 ql_free_send_free_list(qdev); 2919 err_small_buffers: 2920 ql_free_buffer_queues(qdev); 2921 err_buffer_queues: 2922 ql_free_net_req_rsp_queues(qdev); 2923 err_req_rsp: 2924 pci_free_consistent(qdev->pdev, 2925 PAGE_SIZE, 2926 qdev->shadow_reg_virt_addr, 2927 qdev->shadow_reg_phy_addr); 2928 2929 return -ENOMEM; 2930 } 2931 2932 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2933 { 2934 ql_free_send_free_list(qdev); 2935 ql_free_large_buffers(qdev); 2936 ql_free_small_buffers(qdev); 2937 ql_free_buffer_queues(qdev); 2938 ql_free_net_req_rsp_queues(qdev); 2939 if (qdev->shadow_reg_virt_addr != NULL) { 2940 pci_free_consistent(qdev->pdev, 2941 PAGE_SIZE, 2942 qdev->shadow_reg_virt_addr, 2943 qdev->shadow_reg_phy_addr); 2944 qdev->shadow_reg_virt_addr = NULL; 2945 } 2946 } 2947 2948 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2949 { 2950 struct ql3xxx_local_ram_registers __iomem *local_ram = 2951 (void __iomem *)qdev->mem_map_registers; 2952 2953 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2954 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2955 2) << 4)) 2956 return -1; 2957 2958 ql_write_page2_reg(qdev, 2959 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2960 2961 ql_write_page2_reg(qdev, 2962 &local_ram->maxBufletCount, 2963 qdev->nvram_data.bufletCount); 2964 2965 ql_write_page2_reg(qdev, 2966 &local_ram->freeBufletThresholdLow, 2967 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2968 (qdev->nvram_data.tcpWindowThreshold0)); 2969 2970 ql_write_page2_reg(qdev, 2971 &local_ram->freeBufletThresholdHigh, 2972 qdev->nvram_data.tcpWindowThreshold50); 2973 2974 ql_write_page2_reg(qdev, 2975 &local_ram->ipHashTableBase, 2976 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2977 qdev->nvram_data.ipHashTableBaseLo); 2978 ql_write_page2_reg(qdev, 2979 &local_ram->ipHashTableCount, 2980 qdev->nvram_data.ipHashTableSize); 2981 ql_write_page2_reg(qdev, 2982 &local_ram->tcpHashTableBase, 2983 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2984 qdev->nvram_data.tcpHashTableBaseLo); 2985 ql_write_page2_reg(qdev, 2986 &local_ram->tcpHashTableCount, 2987 qdev->nvram_data.tcpHashTableSize); 2988 ql_write_page2_reg(qdev, 2989 &local_ram->ncbBase, 2990 (qdev->nvram_data.ncbTableBaseHi << 16) | 2991 qdev->nvram_data.ncbTableBaseLo); 2992 ql_write_page2_reg(qdev, 2993 &local_ram->maxNcbCount, 2994 qdev->nvram_data.ncbTableSize); 2995 ql_write_page2_reg(qdev, 2996 &local_ram->drbBase, 2997 (qdev->nvram_data.drbTableBaseHi << 16) | 2998 qdev->nvram_data.drbTableBaseLo); 2999 ql_write_page2_reg(qdev, 3000 &local_ram->maxDrbCount, 3001 qdev->nvram_data.drbTableSize); 3002 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3003 return 0; 3004 } 3005 3006 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3007 { 3008 u32 value; 3009 struct ql3xxx_port_registers __iomem *port_regs = 3010 qdev->mem_map_registers; 3011 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3012 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3013 (void __iomem *)port_regs; 3014 u32 delay = 10; 3015 int status = 0; 3016 3017 if (ql_mii_setup(qdev)) 3018 return -1; 3019 3020 /* Bring out PHY out of reset */ 3021 ql_write_common_reg(qdev, spir, 3022 (ISP_SERIAL_PORT_IF_WE | 3023 (ISP_SERIAL_PORT_IF_WE << 16))); 3024 /* Give the PHY time to come out of reset. */ 3025 mdelay(100); 3026 qdev->port_link_state = LS_DOWN; 3027 netif_carrier_off(qdev->ndev); 3028 3029 /* V2 chip fix for ARS-39168. */ 3030 ql_write_common_reg(qdev, spir, 3031 (ISP_SERIAL_PORT_IF_SDE | 3032 (ISP_SERIAL_PORT_IF_SDE << 16))); 3033 3034 /* Request Queue Registers */ 3035 *((u32 *)(qdev->preq_consumer_index)) = 0; 3036 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3037 qdev->req_producer_index = 0; 3038 3039 ql_write_page1_reg(qdev, 3040 &hmem_regs->reqConsumerIndexAddrHigh, 3041 qdev->req_consumer_index_phy_addr_high); 3042 ql_write_page1_reg(qdev, 3043 &hmem_regs->reqConsumerIndexAddrLow, 3044 qdev->req_consumer_index_phy_addr_low); 3045 3046 ql_write_page1_reg(qdev, 3047 &hmem_regs->reqBaseAddrHigh, 3048 MS_64BITS(qdev->req_q_phy_addr)); 3049 ql_write_page1_reg(qdev, 3050 &hmem_regs->reqBaseAddrLow, 3051 LS_64BITS(qdev->req_q_phy_addr)); 3052 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3053 3054 /* Response Queue Registers */ 3055 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3056 qdev->rsp_consumer_index = 0; 3057 qdev->rsp_current = qdev->rsp_q_virt_addr; 3058 3059 ql_write_page1_reg(qdev, 3060 &hmem_regs->rspProducerIndexAddrHigh, 3061 qdev->rsp_producer_index_phy_addr_high); 3062 3063 ql_write_page1_reg(qdev, 3064 &hmem_regs->rspProducerIndexAddrLow, 3065 qdev->rsp_producer_index_phy_addr_low); 3066 3067 ql_write_page1_reg(qdev, 3068 &hmem_regs->rspBaseAddrHigh, 3069 MS_64BITS(qdev->rsp_q_phy_addr)); 3070 3071 ql_write_page1_reg(qdev, 3072 &hmem_regs->rspBaseAddrLow, 3073 LS_64BITS(qdev->rsp_q_phy_addr)); 3074 3075 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3076 3077 /* Large Buffer Queue */ 3078 ql_write_page1_reg(qdev, 3079 &hmem_regs->rxLargeQBaseAddrHigh, 3080 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3081 3082 ql_write_page1_reg(qdev, 3083 &hmem_regs->rxLargeQBaseAddrLow, 3084 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3085 3086 ql_write_page1_reg(qdev, 3087 &hmem_regs->rxLargeQLength, 3088 qdev->num_lbufq_entries); 3089 3090 ql_write_page1_reg(qdev, 3091 &hmem_regs->rxLargeBufferLength, 3092 qdev->lrg_buffer_len); 3093 3094 /* Small Buffer Queue */ 3095 ql_write_page1_reg(qdev, 3096 &hmem_regs->rxSmallQBaseAddrHigh, 3097 MS_64BITS(qdev->small_buf_q_phy_addr)); 3098 3099 ql_write_page1_reg(qdev, 3100 &hmem_regs->rxSmallQBaseAddrLow, 3101 LS_64BITS(qdev->small_buf_q_phy_addr)); 3102 3103 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3104 ql_write_page1_reg(qdev, 3105 &hmem_regs->rxSmallBufferLength, 3106 QL_SMALL_BUFFER_SIZE); 3107 3108 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3109 qdev->small_buf_release_cnt = 8; 3110 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3111 qdev->lrg_buf_release_cnt = 8; 3112 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3113 qdev->small_buf_index = 0; 3114 qdev->lrg_buf_index = 0; 3115 qdev->lrg_buf_free_count = 0; 3116 qdev->lrg_buf_free_head = NULL; 3117 qdev->lrg_buf_free_tail = NULL; 3118 3119 ql_write_common_reg(qdev, 3120 &port_regs->CommonRegs. 3121 rxSmallQProducerIndex, 3122 qdev->small_buf_q_producer_index); 3123 ql_write_common_reg(qdev, 3124 &port_regs->CommonRegs. 3125 rxLargeQProducerIndex, 3126 qdev->lrg_buf_q_producer_index); 3127 3128 /* 3129 * Find out if the chip has already been initialized. If it has, then 3130 * we skip some of the initialization. 3131 */ 3132 clear_bit(QL_LINK_MASTER, &qdev->flags); 3133 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3134 if ((value & PORT_STATUS_IC) == 0) { 3135 3136 /* Chip has not been configured yet, so let it rip. */ 3137 if (ql_init_misc_registers(qdev)) { 3138 status = -1; 3139 goto out; 3140 } 3141 3142 value = qdev->nvram_data.tcpMaxWindowSize; 3143 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3144 3145 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3146 3147 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3148 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3149 * 2) << 13)) { 3150 status = -1; 3151 goto out; 3152 } 3153 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3154 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3155 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3156 16) | (INTERNAL_CHIP_SD | 3157 INTERNAL_CHIP_WE))); 3158 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3159 } 3160 3161 if (qdev->mac_index) 3162 ql_write_page0_reg(qdev, 3163 &port_regs->mac1MaxFrameLengthReg, 3164 qdev->max_frame_size); 3165 else 3166 ql_write_page0_reg(qdev, 3167 &port_regs->mac0MaxFrameLengthReg, 3168 qdev->max_frame_size); 3169 3170 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3171 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3172 2) << 7)) { 3173 status = -1; 3174 goto out; 3175 } 3176 3177 PHY_Setup(qdev); 3178 ql_init_scan_mode(qdev); 3179 ql_get_phy_owner(qdev); 3180 3181 /* Load the MAC Configuration */ 3182 3183 /* Program lower 32 bits of the MAC address */ 3184 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3185 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3186 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3187 ((qdev->ndev->dev_addr[2] << 24) 3188 | (qdev->ndev->dev_addr[3] << 16) 3189 | (qdev->ndev->dev_addr[4] << 8) 3190 | qdev->ndev->dev_addr[5])); 3191 3192 /* Program top 16 bits of the MAC address */ 3193 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3194 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3195 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3196 ((qdev->ndev->dev_addr[0] << 8) 3197 | qdev->ndev->dev_addr[1])); 3198 3199 /* Enable Primary MAC */ 3200 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3201 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3202 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3203 3204 /* Clear Primary and Secondary IP addresses */ 3205 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3206 ((IP_ADDR_INDEX_REG_MASK << 16) | 3207 (qdev->mac_index << 2))); 3208 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3209 3210 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3211 ((IP_ADDR_INDEX_REG_MASK << 16) | 3212 ((qdev->mac_index << 2) + 1))); 3213 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3214 3215 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3216 3217 /* Indicate Configuration Complete */ 3218 ql_write_page0_reg(qdev, 3219 &port_regs->portControl, 3220 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3221 3222 do { 3223 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3224 if (value & PORT_STATUS_IC) 3225 break; 3226 spin_unlock_irq(&qdev->hw_lock); 3227 msleep(500); 3228 spin_lock_irq(&qdev->hw_lock); 3229 } while (--delay); 3230 3231 if (delay == 0) { 3232 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3233 status = -1; 3234 goto out; 3235 } 3236 3237 /* Enable Ethernet Function */ 3238 if (qdev->device_id == QL3032_DEVICE_ID) { 3239 value = 3240 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3241 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3242 QL3032_PORT_CONTROL_ET); 3243 ql_write_page0_reg(qdev, &port_regs->functionControl, 3244 ((value << 16) | value)); 3245 } else { 3246 value = 3247 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3248 PORT_CONTROL_HH); 3249 ql_write_page0_reg(qdev, &port_regs->portControl, 3250 ((value << 16) | value)); 3251 } 3252 3253 3254 out: 3255 return status; 3256 } 3257 3258 /* 3259 * Caller holds hw_lock. 3260 */ 3261 static int ql_adapter_reset(struct ql3_adapter *qdev) 3262 { 3263 struct ql3xxx_port_registers __iomem *port_regs = 3264 qdev->mem_map_registers; 3265 int status = 0; 3266 u16 value; 3267 int max_wait_time; 3268 3269 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3270 clear_bit(QL_RESET_DONE, &qdev->flags); 3271 3272 /* 3273 * Issue soft reset to chip. 3274 */ 3275 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3276 ql_write_common_reg(qdev, 3277 &port_regs->CommonRegs.ispControlStatus, 3278 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3279 3280 /* Wait 3 seconds for reset to complete. */ 3281 netdev_printk(KERN_DEBUG, qdev->ndev, 3282 "Wait 10 milliseconds for reset to complete\n"); 3283 3284 /* Wait until the firmware tells us the Soft Reset is done */ 3285 max_wait_time = 5; 3286 do { 3287 value = 3288 ql_read_common_reg(qdev, 3289 &port_regs->CommonRegs.ispControlStatus); 3290 if ((value & ISP_CONTROL_SR) == 0) 3291 break; 3292 3293 ssleep(1); 3294 } while ((--max_wait_time)); 3295 3296 /* 3297 * Also, make sure that the Network Reset Interrupt bit has been 3298 * cleared after the soft reset has taken place. 3299 */ 3300 value = 3301 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3302 if (value & ISP_CONTROL_RI) { 3303 netdev_printk(KERN_DEBUG, qdev->ndev, 3304 "clearing RI after reset\n"); 3305 ql_write_common_reg(qdev, 3306 &port_regs->CommonRegs. 3307 ispControlStatus, 3308 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3309 } 3310 3311 if (max_wait_time == 0) { 3312 /* Issue Force Soft Reset */ 3313 ql_write_common_reg(qdev, 3314 &port_regs->CommonRegs. 3315 ispControlStatus, 3316 ((ISP_CONTROL_FSR << 16) | 3317 ISP_CONTROL_FSR)); 3318 /* 3319 * Wait until the firmware tells us the Force Soft Reset is 3320 * done 3321 */ 3322 max_wait_time = 5; 3323 do { 3324 value = ql_read_common_reg(qdev, 3325 &port_regs->CommonRegs. 3326 ispControlStatus); 3327 if ((value & ISP_CONTROL_FSR) == 0) 3328 break; 3329 ssleep(1); 3330 } while ((--max_wait_time)); 3331 } 3332 if (max_wait_time == 0) 3333 status = 1; 3334 3335 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3336 set_bit(QL_RESET_DONE, &qdev->flags); 3337 return status; 3338 } 3339 3340 static void ql_set_mac_info(struct ql3_adapter *qdev) 3341 { 3342 struct ql3xxx_port_registers __iomem *port_regs = 3343 qdev->mem_map_registers; 3344 u32 value, port_status; 3345 u8 func_number; 3346 3347 /* Get the function number */ 3348 value = 3349 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3350 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3351 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3352 switch (value & ISP_CONTROL_FN_MASK) { 3353 case ISP_CONTROL_FN0_NET: 3354 qdev->mac_index = 0; 3355 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3356 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3357 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3358 if (port_status & PORT_STATUS_SM0) 3359 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3360 else 3361 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3362 break; 3363 3364 case ISP_CONTROL_FN1_NET: 3365 qdev->mac_index = 1; 3366 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3367 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3368 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3369 if (port_status & PORT_STATUS_SM1) 3370 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3371 else 3372 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3373 break; 3374 3375 case ISP_CONTROL_FN0_SCSI: 3376 case ISP_CONTROL_FN1_SCSI: 3377 default: 3378 netdev_printk(KERN_DEBUG, qdev->ndev, 3379 "Invalid function number, ispControlStatus = 0x%x\n", 3380 value); 3381 break; 3382 } 3383 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3384 } 3385 3386 static void ql_display_dev_info(struct net_device *ndev) 3387 { 3388 struct ql3_adapter *qdev = netdev_priv(ndev); 3389 struct pci_dev *pdev = qdev->pdev; 3390 3391 netdev_info(ndev, 3392 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3393 DRV_NAME, qdev->index, qdev->chip_rev_id, 3394 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3395 qdev->pci_slot); 3396 netdev_info(ndev, "%s Interface\n", 3397 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3398 3399 /* 3400 * Print PCI bus width/type. 3401 */ 3402 netdev_info(ndev, "Bus interface is %s %s\n", 3403 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3404 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3405 3406 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3407 qdev->mem_map_registers); 3408 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3409 3410 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3411 } 3412 3413 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3414 { 3415 struct net_device *ndev = qdev->ndev; 3416 int retval = 0; 3417 3418 netif_stop_queue(ndev); 3419 netif_carrier_off(ndev); 3420 3421 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3422 clear_bit(QL_LINK_MASTER, &qdev->flags); 3423 3424 ql_disable_interrupts(qdev); 3425 3426 free_irq(qdev->pdev->irq, ndev); 3427 3428 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3429 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3430 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3431 pci_disable_msi(qdev->pdev); 3432 } 3433 3434 del_timer_sync(&qdev->adapter_timer); 3435 3436 napi_disable(&qdev->napi); 3437 3438 if (do_reset) { 3439 int soft_reset; 3440 unsigned long hw_flags; 3441 3442 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3443 if (ql_wait_for_drvr_lock(qdev)) { 3444 soft_reset = ql_adapter_reset(qdev); 3445 if (soft_reset) { 3446 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3447 qdev->index); 3448 } 3449 netdev_err(ndev, 3450 "Releasing driver lock via chip reset\n"); 3451 } else { 3452 netdev_err(ndev, 3453 "Could not acquire driver lock to do reset!\n"); 3454 retval = -1; 3455 } 3456 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3457 } 3458 ql_free_mem_resources(qdev); 3459 return retval; 3460 } 3461 3462 static int ql_adapter_up(struct ql3_adapter *qdev) 3463 { 3464 struct net_device *ndev = qdev->ndev; 3465 int err; 3466 unsigned long irq_flags = IRQF_SHARED; 3467 unsigned long hw_flags; 3468 3469 if (ql_alloc_mem_resources(qdev)) { 3470 netdev_err(ndev, "Unable to allocate buffers\n"); 3471 return -ENOMEM; 3472 } 3473 3474 if (qdev->msi) { 3475 if (pci_enable_msi(qdev->pdev)) { 3476 netdev_err(ndev, 3477 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3478 qdev->msi = 0; 3479 } else { 3480 netdev_info(ndev, "MSI Enabled...\n"); 3481 set_bit(QL_MSI_ENABLED, &qdev->flags); 3482 irq_flags &= ~IRQF_SHARED; 3483 } 3484 } 3485 3486 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3487 irq_flags, ndev->name, ndev); 3488 if (err) { 3489 netdev_err(ndev, 3490 "Failed to reserve interrupt %d - already in use\n", 3491 qdev->pdev->irq); 3492 goto err_irq; 3493 } 3494 3495 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3496 3497 err = ql_wait_for_drvr_lock(qdev); 3498 if (err) { 3499 err = ql_adapter_initialize(qdev); 3500 if (err) { 3501 netdev_err(ndev, "Unable to initialize adapter\n"); 3502 goto err_init; 3503 } 3504 netdev_err(ndev, "Releasing driver lock\n"); 3505 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3506 } else { 3507 netdev_err(ndev, "Could not acquire driver lock\n"); 3508 goto err_lock; 3509 } 3510 3511 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3512 3513 set_bit(QL_ADAPTER_UP, &qdev->flags); 3514 3515 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3516 3517 napi_enable(&qdev->napi); 3518 ql_enable_interrupts(qdev); 3519 return 0; 3520 3521 err_init: 3522 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3523 err_lock: 3524 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3525 free_irq(qdev->pdev->irq, ndev); 3526 err_irq: 3527 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3528 netdev_info(ndev, "calling pci_disable_msi()\n"); 3529 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3530 pci_disable_msi(qdev->pdev); 3531 } 3532 return err; 3533 } 3534 3535 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3536 { 3537 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3538 netdev_err(qdev->ndev, 3539 "Driver up/down cycle failed, closing device\n"); 3540 rtnl_lock(); 3541 dev_close(qdev->ndev); 3542 rtnl_unlock(); 3543 return -1; 3544 } 3545 return 0; 3546 } 3547 3548 static int ql3xxx_close(struct net_device *ndev) 3549 { 3550 struct ql3_adapter *qdev = netdev_priv(ndev); 3551 3552 /* 3553 * Wait for device to recover from a reset. 3554 * (Rarely happens, but possible.) 3555 */ 3556 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3557 msleep(50); 3558 3559 ql_adapter_down(qdev, QL_DO_RESET); 3560 return 0; 3561 } 3562 3563 static int ql3xxx_open(struct net_device *ndev) 3564 { 3565 struct ql3_adapter *qdev = netdev_priv(ndev); 3566 return ql_adapter_up(qdev); 3567 } 3568 3569 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3570 { 3571 struct ql3_adapter *qdev = netdev_priv(ndev); 3572 struct ql3xxx_port_registers __iomem *port_regs = 3573 qdev->mem_map_registers; 3574 struct sockaddr *addr = p; 3575 unsigned long hw_flags; 3576 3577 if (netif_running(ndev)) 3578 return -EBUSY; 3579 3580 if (!is_valid_ether_addr(addr->sa_data)) 3581 return -EADDRNOTAVAIL; 3582 3583 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3584 3585 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3586 /* Program lower 32 bits of the MAC address */ 3587 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3588 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3589 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3590 ((ndev->dev_addr[2] << 24) | (ndev-> 3591 dev_addr[3] << 16) | 3592 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3593 3594 /* Program top 16 bits of the MAC address */ 3595 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3596 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3597 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3598 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3599 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3600 3601 return 0; 3602 } 3603 3604 static void ql3xxx_tx_timeout(struct net_device *ndev) 3605 { 3606 struct ql3_adapter *qdev = netdev_priv(ndev); 3607 3608 netdev_err(ndev, "Resetting...\n"); 3609 /* 3610 * Stop the queues, we've got a problem. 3611 */ 3612 netif_stop_queue(ndev); 3613 3614 /* 3615 * Wake up the worker to process this event. 3616 */ 3617 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3618 } 3619 3620 static void ql_reset_work(struct work_struct *work) 3621 { 3622 struct ql3_adapter *qdev = 3623 container_of(work, struct ql3_adapter, reset_work.work); 3624 struct net_device *ndev = qdev->ndev; 3625 u32 value; 3626 struct ql_tx_buf_cb *tx_cb; 3627 int max_wait_time, i; 3628 struct ql3xxx_port_registers __iomem *port_regs = 3629 qdev->mem_map_registers; 3630 unsigned long hw_flags; 3631 3632 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3633 clear_bit(QL_LINK_MASTER, &qdev->flags); 3634 3635 /* 3636 * Loop through the active list and return the skb. 3637 */ 3638 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3639 int j; 3640 tx_cb = &qdev->tx_buf[i]; 3641 if (tx_cb->skb) { 3642 netdev_printk(KERN_DEBUG, ndev, 3643 "Freeing lost SKB\n"); 3644 pci_unmap_single(qdev->pdev, 3645 dma_unmap_addr(&tx_cb->map[0], 3646 mapaddr), 3647 dma_unmap_len(&tx_cb->map[0], maplen), 3648 PCI_DMA_TODEVICE); 3649 for (j = 1; j < tx_cb->seg_count; j++) { 3650 pci_unmap_page(qdev->pdev, 3651 dma_unmap_addr(&tx_cb->map[j], 3652 mapaddr), 3653 dma_unmap_len(&tx_cb->map[j], 3654 maplen), 3655 PCI_DMA_TODEVICE); 3656 } 3657 dev_kfree_skb(tx_cb->skb); 3658 tx_cb->skb = NULL; 3659 } 3660 } 3661 3662 netdev_err(ndev, "Clearing NRI after reset\n"); 3663 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3664 ql_write_common_reg(qdev, 3665 &port_regs->CommonRegs. 3666 ispControlStatus, 3667 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3668 /* 3669 * Wait the for Soft Reset to Complete. 3670 */ 3671 max_wait_time = 10; 3672 do { 3673 value = ql_read_common_reg(qdev, 3674 &port_regs->CommonRegs. 3675 3676 ispControlStatus); 3677 if ((value & ISP_CONTROL_SR) == 0) { 3678 netdev_printk(KERN_DEBUG, ndev, 3679 "reset completed\n"); 3680 break; 3681 } 3682 3683 if (value & ISP_CONTROL_RI) { 3684 netdev_printk(KERN_DEBUG, ndev, 3685 "clearing NRI after reset\n"); 3686 ql_write_common_reg(qdev, 3687 &port_regs-> 3688 CommonRegs. 3689 ispControlStatus, 3690 ((ISP_CONTROL_RI << 3691 16) | ISP_CONTROL_RI)); 3692 } 3693 3694 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3695 ssleep(1); 3696 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3697 } while (--max_wait_time); 3698 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3699 3700 if (value & ISP_CONTROL_SR) { 3701 3702 /* 3703 * Set the reset flags and clear the board again. 3704 * Nothing else to do... 3705 */ 3706 netdev_err(ndev, 3707 "Timed out waiting for reset to complete\n"); 3708 netdev_err(ndev, "Do a reset\n"); 3709 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3710 clear_bit(QL_RESET_START, &qdev->flags); 3711 ql_cycle_adapter(qdev, QL_DO_RESET); 3712 return; 3713 } 3714 3715 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3716 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3717 clear_bit(QL_RESET_START, &qdev->flags); 3718 ql_cycle_adapter(qdev, QL_NO_RESET); 3719 } 3720 } 3721 3722 static void ql_tx_timeout_work(struct work_struct *work) 3723 { 3724 struct ql3_adapter *qdev = 3725 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3726 3727 ql_cycle_adapter(qdev, QL_DO_RESET); 3728 } 3729 3730 static void ql_get_board_info(struct ql3_adapter *qdev) 3731 { 3732 struct ql3xxx_port_registers __iomem *port_regs = 3733 qdev->mem_map_registers; 3734 u32 value; 3735 3736 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3737 3738 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3739 if (value & PORT_STATUS_64) 3740 qdev->pci_width = 64; 3741 else 3742 qdev->pci_width = 32; 3743 if (value & PORT_STATUS_X) 3744 qdev->pci_x = 1; 3745 else 3746 qdev->pci_x = 0; 3747 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3748 } 3749 3750 static void ql3xxx_timer(struct timer_list *t) 3751 { 3752 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer); 3753 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3754 } 3755 3756 static const struct net_device_ops ql3xxx_netdev_ops = { 3757 .ndo_open = ql3xxx_open, 3758 .ndo_start_xmit = ql3xxx_send, 3759 .ndo_stop = ql3xxx_close, 3760 .ndo_validate_addr = eth_validate_addr, 3761 .ndo_set_mac_address = ql3xxx_set_mac_address, 3762 .ndo_tx_timeout = ql3xxx_tx_timeout, 3763 }; 3764 3765 static int ql3xxx_probe(struct pci_dev *pdev, 3766 const struct pci_device_id *pci_entry) 3767 { 3768 struct net_device *ndev = NULL; 3769 struct ql3_adapter *qdev = NULL; 3770 static int cards_found; 3771 int uninitialized_var(pci_using_dac), err; 3772 3773 err = pci_enable_device(pdev); 3774 if (err) { 3775 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3776 goto err_out; 3777 } 3778 3779 err = pci_request_regions(pdev, DRV_NAME); 3780 if (err) { 3781 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3782 goto err_out_disable_pdev; 3783 } 3784 3785 pci_set_master(pdev); 3786 3787 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3788 pci_using_dac = 1; 3789 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3790 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3791 pci_using_dac = 0; 3792 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3793 } 3794 3795 if (err) { 3796 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3797 goto err_out_free_regions; 3798 } 3799 3800 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3801 if (!ndev) { 3802 err = -ENOMEM; 3803 goto err_out_free_regions; 3804 } 3805 3806 SET_NETDEV_DEV(ndev, &pdev->dev); 3807 3808 pci_set_drvdata(pdev, ndev); 3809 3810 qdev = netdev_priv(ndev); 3811 qdev->index = cards_found; 3812 qdev->ndev = ndev; 3813 qdev->pdev = pdev; 3814 qdev->device_id = pci_entry->device; 3815 qdev->port_link_state = LS_DOWN; 3816 if (msi) 3817 qdev->msi = 1; 3818 3819 qdev->msg_enable = netif_msg_init(debug, default_msg); 3820 3821 if (pci_using_dac) 3822 ndev->features |= NETIF_F_HIGHDMA; 3823 if (qdev->device_id == QL3032_DEVICE_ID) 3824 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3825 3826 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3827 if (!qdev->mem_map_registers) { 3828 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3829 err = -EIO; 3830 goto err_out_free_ndev; 3831 } 3832 3833 spin_lock_init(&qdev->adapter_lock); 3834 spin_lock_init(&qdev->hw_lock); 3835 3836 /* Set driver entry points */ 3837 ndev->netdev_ops = &ql3xxx_netdev_ops; 3838 ndev->ethtool_ops = &ql3xxx_ethtool_ops; 3839 ndev->watchdog_timeo = 5 * HZ; 3840 3841 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3842 3843 ndev->irq = pdev->irq; 3844 3845 /* make sure the EEPROM is good */ 3846 if (ql_get_nvram_params(qdev)) { 3847 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3848 __func__, qdev->index); 3849 err = -EIO; 3850 goto err_out_iounmap; 3851 } 3852 3853 ql_set_mac_info(qdev); 3854 3855 /* Validate and set parameters */ 3856 if (qdev->mac_index) { 3857 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3858 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3859 } else { 3860 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3861 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3862 } 3863 3864 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3865 3866 /* Record PCI bus information. */ 3867 ql_get_board_info(qdev); 3868 3869 /* 3870 * Set the Maximum Memory Read Byte Count value. We do this to handle 3871 * jumbo frames. 3872 */ 3873 if (qdev->pci_x) 3874 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3875 3876 err = register_netdev(ndev); 3877 if (err) { 3878 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3879 goto err_out_iounmap; 3880 } 3881 3882 /* we're going to reset, so assume we have no link for now */ 3883 3884 netif_carrier_off(ndev); 3885 netif_stop_queue(ndev); 3886 3887 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3888 if (!qdev->workqueue) { 3889 unregister_netdev(ndev); 3890 err = -ENOMEM; 3891 goto err_out_iounmap; 3892 } 3893 3894 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3895 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3896 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3897 3898 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0); 3899 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3900 3901 if (!cards_found) { 3902 pr_alert("%s\n", DRV_STRING); 3903 pr_alert("Driver name: %s, Version: %s\n", 3904 DRV_NAME, DRV_VERSION); 3905 } 3906 ql_display_dev_info(ndev); 3907 3908 cards_found++; 3909 return 0; 3910 3911 err_out_iounmap: 3912 iounmap(qdev->mem_map_registers); 3913 err_out_free_ndev: 3914 free_netdev(ndev); 3915 err_out_free_regions: 3916 pci_release_regions(pdev); 3917 err_out_disable_pdev: 3918 pci_disable_device(pdev); 3919 err_out: 3920 return err; 3921 } 3922 3923 static void ql3xxx_remove(struct pci_dev *pdev) 3924 { 3925 struct net_device *ndev = pci_get_drvdata(pdev); 3926 struct ql3_adapter *qdev = netdev_priv(ndev); 3927 3928 unregister_netdev(ndev); 3929 3930 ql_disable_interrupts(qdev); 3931 3932 if (qdev->workqueue) { 3933 cancel_delayed_work(&qdev->reset_work); 3934 cancel_delayed_work(&qdev->tx_timeout_work); 3935 destroy_workqueue(qdev->workqueue); 3936 qdev->workqueue = NULL; 3937 } 3938 3939 iounmap(qdev->mem_map_registers); 3940 pci_release_regions(pdev); 3941 free_netdev(ndev); 3942 } 3943 3944 static struct pci_driver ql3xxx_driver = { 3945 3946 .name = DRV_NAME, 3947 .id_table = ql3xxx_pci_tbl, 3948 .probe = ql3xxx_probe, 3949 .remove = ql3xxx_remove, 3950 }; 3951 3952 module_pci_driver(ql3xxx_driver); 3953