1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/pci.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/dmapool.h> 20 #include <linux/mempool.h> 21 #include <linux/spinlock.h> 22 #include <linux/kthread.h> 23 #include <linux/interrupt.h> 24 #include <linux/errno.h> 25 #include <linux/ioport.h> 26 #include <linux/ip.h> 27 #include <linux/in.h> 28 #include <linux/if_arp.h> 29 #include <linux/if_ether.h> 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/ethtool.h> 33 #include <linux/skbuff.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/if_vlan.h> 36 #include <linux/delay.h> 37 #include <linux/mm.h> 38 #include <linux/prefetch.h> 39 40 #include "qla3xxx.h" 41 42 #define DRV_NAME "qla3xxx" 43 #define DRV_STRING "QLogic ISP3XXX Network Driver" 44 #define DRV_VERSION "v2.03.00-k5" 45 46 static const char ql3xxx_driver_name[] = DRV_NAME; 47 static const char ql3xxx_driver_version[] = DRV_VERSION; 48 49 #define TIMED_OUT_MSG \ 50 "Timed out waiting for management port to get free before issuing command\n" 51 52 MODULE_AUTHOR("QLogic Corporation"); 53 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 54 MODULE_LICENSE("GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 static const u32 default_msg 58 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 59 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 60 61 static int debug = -1; /* defaults above */ 62 module_param(debug, int, 0); 63 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 64 65 static int msi; 66 module_param(msi, int, 0); 67 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 68 69 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 71 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 72 /* required last entry */ 73 {0,} 74 }; 75 76 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 77 78 /* 79 * These are the known PHY's which are used 80 */ 81 enum PHY_DEVICE_TYPE { 82 PHY_TYPE_UNKNOWN = 0, 83 PHY_VITESSE_VSC8211, 84 PHY_AGERE_ET1011C, 85 MAX_PHY_DEV_TYPES 86 }; 87 88 struct PHY_DEVICE_INFO { 89 const enum PHY_DEVICE_TYPE phyDevice; 90 const u32 phyIdOUI; 91 const u16 phyIdModel; 92 const char *name; 93 }; 94 95 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 96 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 97 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 98 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 99 }; 100 101 102 /* 103 * Caller must take hw_lock. 104 */ 105 static int ql_sem_spinlock(struct ql3_adapter *qdev, 106 u32 sem_mask, u32 sem_bits) 107 { 108 struct ql3xxx_port_registers __iomem *port_regs = 109 qdev->mem_map_registers; 110 u32 value; 111 unsigned int seconds = 3; 112 113 do { 114 writel((sem_mask | sem_bits), 115 &port_regs->CommonRegs.semaphoreReg); 116 value = readl(&port_regs->CommonRegs.semaphoreReg); 117 if ((value & (sem_mask >> 16)) == sem_bits) 118 return 0; 119 ssleep(1); 120 } while (--seconds); 121 return -1; 122 } 123 124 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 125 { 126 struct ql3xxx_port_registers __iomem *port_regs = 127 qdev->mem_map_registers; 128 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 129 readl(&port_regs->CommonRegs.semaphoreReg); 130 } 131 132 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 133 { 134 struct ql3xxx_port_registers __iomem *port_regs = 135 qdev->mem_map_registers; 136 u32 value; 137 138 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 139 value = readl(&port_regs->CommonRegs.semaphoreReg); 140 return ((value & (sem_mask >> 16)) == sem_bits); 141 } 142 143 /* 144 * Caller holds hw_lock. 145 */ 146 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 147 { 148 int i = 0; 149 150 while (i < 10) { 151 if (i) 152 ssleep(1); 153 154 if (ql_sem_lock(qdev, 155 QL_DRVR_SEM_MASK, 156 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 157 * 2) << 1)) { 158 netdev_printk(KERN_DEBUG, qdev->ndev, 159 "driver lock acquired\n"); 160 return 1; 161 } 162 } 163 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 165 return 0; 166 } 167 168 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 169 { 170 struct ql3xxx_port_registers __iomem *port_regs = 171 qdev->mem_map_registers; 172 173 writel(((ISP_CONTROL_NP_MASK << 16) | page), 174 &port_regs->CommonRegs.ispControlStatus); 175 readl(&port_regs->CommonRegs.ispControlStatus); 176 qdev->current_page = page; 177 } 178 179 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 180 { 181 u32 value; 182 unsigned long hw_flags; 183 184 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 185 value = readl(reg); 186 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 187 188 return value; 189 } 190 191 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 192 { 193 return readl(reg); 194 } 195 196 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 197 { 198 u32 value; 199 unsigned long hw_flags; 200 201 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 202 203 if (qdev->current_page != 0) 204 ql_set_register_page(qdev, 0); 205 value = readl(reg); 206 207 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 208 return value; 209 } 210 211 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 212 { 213 if (qdev->current_page != 0) 214 ql_set_register_page(qdev, 0); 215 return readl(reg); 216 } 217 218 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 219 u32 __iomem *reg, u32 value) 220 { 221 unsigned long hw_flags; 222 223 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 224 writel(value, reg); 225 readl(reg); 226 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 227 } 228 229 static void ql_write_common_reg(struct ql3_adapter *qdev, 230 u32 __iomem *reg, u32 value) 231 { 232 writel(value, reg); 233 readl(reg); 234 } 235 236 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 237 u32 __iomem *reg, u32 value) 238 { 239 writel(value, reg); 240 readl(reg); 241 udelay(1); 242 } 243 244 static void ql_write_page0_reg(struct ql3_adapter *qdev, 245 u32 __iomem *reg, u32 value) 246 { 247 if (qdev->current_page != 0) 248 ql_set_register_page(qdev, 0); 249 writel(value, reg); 250 readl(reg); 251 } 252 253 /* 254 * Caller holds hw_lock. Only called during init. 255 */ 256 static void ql_write_page1_reg(struct ql3_adapter *qdev, 257 u32 __iomem *reg, u32 value) 258 { 259 if (qdev->current_page != 1) 260 ql_set_register_page(qdev, 1); 261 writel(value, reg); 262 readl(reg); 263 } 264 265 /* 266 * Caller holds hw_lock. Only called during init. 267 */ 268 static void ql_write_page2_reg(struct ql3_adapter *qdev, 269 u32 __iomem *reg, u32 value) 270 { 271 if (qdev->current_page != 2) 272 ql_set_register_page(qdev, 2); 273 writel(value, reg); 274 readl(reg); 275 } 276 277 static void ql_disable_interrupts(struct ql3_adapter *qdev) 278 { 279 struct ql3xxx_port_registers __iomem *port_regs = 280 qdev->mem_map_registers; 281 282 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 283 (ISP_IMR_ENABLE_INT << 16)); 284 285 } 286 287 static void ql_enable_interrupts(struct ql3_adapter *qdev) 288 { 289 struct ql3xxx_port_registers __iomem *port_regs = 290 qdev->mem_map_registers; 291 292 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 293 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 294 295 } 296 297 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 298 struct ql_rcv_buf_cb *lrg_buf_cb) 299 { 300 dma_addr_t map; 301 int err; 302 lrg_buf_cb->next = NULL; 303 304 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 305 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } else { 307 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 308 qdev->lrg_buf_free_tail = lrg_buf_cb; 309 } 310 311 if (!lrg_buf_cb->skb) { 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 313 qdev->lrg_buffer_len); 314 if (unlikely(!lrg_buf_cb->skb)) { 315 qdev->lrg_buf_skb_check++; 316 } else { 317 /* 318 * We save some space to copy the ethhdr from first 319 * buffer 320 */ 321 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 322 map = pci_map_single(qdev->pdev, 323 lrg_buf_cb->skb->data, 324 qdev->lrg_buffer_len - 325 QL_HEADER_SPACE, 326 PCI_DMA_FROMDEVICE); 327 err = pci_dma_mapping_error(qdev->pdev, map); 328 if (err) { 329 netdev_err(qdev->ndev, 330 "PCI mapping failed with error: %d\n", 331 err); 332 dev_kfree_skb(lrg_buf_cb->skb); 333 lrg_buf_cb->skb = NULL; 334 335 qdev->lrg_buf_skb_check++; 336 return; 337 } 338 339 lrg_buf_cb->buf_phy_addr_low = 340 cpu_to_le32(LS_64BITS(map)); 341 lrg_buf_cb->buf_phy_addr_high = 342 cpu_to_le32(MS_64BITS(map)); 343 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 344 dma_unmap_len_set(lrg_buf_cb, maplen, 345 qdev->lrg_buffer_len - 346 QL_HEADER_SPACE); 347 } 348 } 349 350 qdev->lrg_buf_free_count++; 351 } 352 353 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 354 *qdev) 355 { 356 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 357 358 if (lrg_buf_cb != NULL) { 359 qdev->lrg_buf_free_head = lrg_buf_cb->next; 360 if (qdev->lrg_buf_free_head == NULL) 361 qdev->lrg_buf_free_tail = NULL; 362 qdev->lrg_buf_free_count--; 363 } 364 365 return lrg_buf_cb; 366 } 367 368 static u32 addrBits = EEPROM_NO_ADDR_BITS; 369 static u32 dataBits = EEPROM_NO_DATA_BITS; 370 371 static void fm93c56a_deselect(struct ql3_adapter *qdev); 372 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 373 unsigned short *value); 374 375 /* 376 * Caller holds hw_lock. 377 */ 378 static void fm93c56a_select(struct ql3_adapter *qdev) 379 { 380 struct ql3xxx_port_registers __iomem *port_regs = 381 qdev->mem_map_registers; 382 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 383 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 386 ql_write_nvram_reg(qdev, spir, 387 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 388 } 389 390 /* 391 * Caller holds hw_lock. 392 */ 393 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 394 { 395 int i; 396 u32 mask; 397 u32 dataBit; 398 u32 previousBit; 399 struct ql3xxx_port_registers __iomem *port_regs = 400 qdev->mem_map_registers; 401 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 402 403 /* Clock in a zero, then do the start bit */ 404 ql_write_nvram_reg(qdev, spir, 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 406 AUBURN_EEPROM_DO_1)); 407 ql_write_nvram_reg(qdev, spir, 408 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 409 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 410 ql_write_nvram_reg(qdev, spir, 411 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 412 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 413 414 mask = 1 << (FM93C56A_CMD_BITS - 1); 415 /* Force the previous data bit to be different */ 416 previousBit = 0xffff; 417 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 418 dataBit = (cmd & mask) 419 ? AUBURN_EEPROM_DO_1 420 : AUBURN_EEPROM_DO_0; 421 if (previousBit != dataBit) { 422 /* If the bit changed, change the DO state to match */ 423 ql_write_nvram_reg(qdev, spir, 424 (ISP_NVRAM_MASK | 425 qdev->eeprom_cmd_data | dataBit)); 426 previousBit = dataBit; 427 } 428 ql_write_nvram_reg(qdev, spir, 429 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 430 dataBit | AUBURN_EEPROM_CLK_RISE)); 431 ql_write_nvram_reg(qdev, spir, 432 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 433 dataBit | AUBURN_EEPROM_CLK_FALL)); 434 cmd = cmd << 1; 435 } 436 437 mask = 1 << (addrBits - 1); 438 /* Force the previous data bit to be different */ 439 previousBit = 0xffff; 440 for (i = 0; i < addrBits; i++) { 441 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 442 : AUBURN_EEPROM_DO_0; 443 if (previousBit != dataBit) { 444 /* 445 * If the bit changed, then change the DO state to 446 * match 447 */ 448 ql_write_nvram_reg(qdev, spir, 449 (ISP_NVRAM_MASK | 450 qdev->eeprom_cmd_data | dataBit)); 451 previousBit = dataBit; 452 } 453 ql_write_nvram_reg(qdev, spir, 454 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 455 dataBit | AUBURN_EEPROM_CLK_RISE)); 456 ql_write_nvram_reg(qdev, spir, 457 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 458 dataBit | AUBURN_EEPROM_CLK_FALL)); 459 eepromAddr = eepromAddr << 1; 460 } 461 } 462 463 /* 464 * Caller holds hw_lock. 465 */ 466 static void fm93c56a_deselect(struct ql3_adapter *qdev) 467 { 468 struct ql3xxx_port_registers __iomem *port_regs = 469 qdev->mem_map_registers; 470 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 471 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 474 } 475 476 /* 477 * Caller holds hw_lock. 478 */ 479 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 480 { 481 int i; 482 u32 data = 0; 483 u32 dataBit; 484 struct ql3xxx_port_registers __iomem *port_regs = 485 qdev->mem_map_registers; 486 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 487 488 /* Read the data bits */ 489 /* The first bit is a dummy. Clock right over it. */ 490 for (i = 0; i < dataBits; i++) { 491 ql_write_nvram_reg(qdev, spir, 492 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 493 AUBURN_EEPROM_CLK_RISE); 494 ql_write_nvram_reg(qdev, spir, 495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 496 AUBURN_EEPROM_CLK_FALL); 497 dataBit = (ql_read_common_reg(qdev, spir) & 498 AUBURN_EEPROM_DI_1) ? 1 : 0; 499 data = (data << 1) | dataBit; 500 } 501 *value = (u16)data; 502 } 503 504 /* 505 * Caller holds hw_lock. 506 */ 507 static void eeprom_readword(struct ql3_adapter *qdev, 508 u32 eepromAddr, unsigned short *value) 509 { 510 fm93c56a_select(qdev); 511 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 512 fm93c56a_datain(qdev, value); 513 fm93c56a_deselect(qdev); 514 } 515 516 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 517 { 518 __le16 *p = (__le16 *)ndev->dev_addr; 519 p[0] = cpu_to_le16(addr[0]); 520 p[1] = cpu_to_le16(addr[1]); 521 p[2] = cpu_to_le16(addr[2]); 522 } 523 524 static int ql_get_nvram_params(struct ql3_adapter *qdev) 525 { 526 u16 *pEEPROMData; 527 u16 checksum = 0; 528 u32 index; 529 unsigned long hw_flags; 530 531 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 532 533 pEEPROMData = (u16 *)&qdev->nvram_data; 534 qdev->eeprom_cmd_data = 0; 535 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 536 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 537 2) << 10)) { 538 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 539 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 540 return -1; 541 } 542 543 for (index = 0; index < EEPROM_SIZE; index++) { 544 eeprom_readword(qdev, index, pEEPROMData); 545 checksum += *pEEPROMData; 546 pEEPROMData++; 547 } 548 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 549 550 if (checksum != 0) { 551 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 552 checksum); 553 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 554 return -1; 555 } 556 557 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 558 return checksum; 559 } 560 561 static const u32 PHYAddr[2] = { 562 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 563 }; 564 565 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 566 { 567 struct ql3xxx_port_registers __iomem *port_regs = 568 qdev->mem_map_registers; 569 u32 temp; 570 int count = 1000; 571 572 while (count) { 573 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 574 if (!(temp & MAC_MII_STATUS_BSY)) 575 return 0; 576 udelay(10); 577 count--; 578 } 579 return -1; 580 } 581 582 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 583 { 584 struct ql3xxx_port_registers __iomem *port_regs = 585 qdev->mem_map_registers; 586 u32 scanControl; 587 588 if (qdev->numPorts > 1) { 589 /* Auto scan will cycle through multiple ports */ 590 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 591 } else { 592 scanControl = MAC_MII_CONTROL_SC; 593 } 594 595 /* 596 * Scan register 1 of PHY/PETBI, 597 * Set up to scan both devices 598 * The autoscan starts from the first register, completes 599 * the last one before rolling over to the first 600 */ 601 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 602 PHYAddr[0] | MII_SCAN_REGISTER); 603 604 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 605 (scanControl) | 606 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 607 } 608 609 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 610 { 611 u8 ret; 612 struct ql3xxx_port_registers __iomem *port_regs = 613 qdev->mem_map_registers; 614 615 /* See if scan mode is enabled before we turn it off */ 616 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 617 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 618 /* Scan is enabled */ 619 ret = 1; 620 } else { 621 /* Scan is disabled */ 622 ret = 0; 623 } 624 625 /* 626 * When disabling scan mode you must first change the MII register 627 * address 628 */ 629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 630 PHYAddr[0] | MII_SCAN_REGISTER); 631 632 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 633 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 634 MAC_MII_CONTROL_RC) << 16)); 635 636 return ret; 637 } 638 639 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 640 u16 regAddr, u16 value, u32 phyAddr) 641 { 642 struct ql3xxx_port_registers __iomem *port_regs = 643 qdev->mem_map_registers; 644 u8 scanWasEnabled; 645 646 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 647 648 if (ql_wait_for_mii_ready(qdev)) { 649 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 650 return -1; 651 } 652 653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 654 phyAddr | regAddr); 655 656 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 657 658 /* Wait for write to complete 9/10/04 SJP */ 659 if (ql_wait_for_mii_ready(qdev)) { 660 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 661 return -1; 662 } 663 664 if (scanWasEnabled) 665 ql_mii_enable_scan_mode(qdev); 666 667 return 0; 668 } 669 670 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 671 u16 *value, u32 phyAddr) 672 { 673 struct ql3xxx_port_registers __iomem *port_regs = 674 qdev->mem_map_registers; 675 u8 scanWasEnabled; 676 u32 temp; 677 678 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 679 680 if (ql_wait_for_mii_ready(qdev)) { 681 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 682 return -1; 683 } 684 685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 686 phyAddr | regAddr); 687 688 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 689 (MAC_MII_CONTROL_RC << 16)); 690 691 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 692 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 693 694 /* Wait for the read to complete */ 695 if (ql_wait_for_mii_ready(qdev)) { 696 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 697 return -1; 698 } 699 700 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 701 *value = (u16) temp; 702 703 if (scanWasEnabled) 704 ql_mii_enable_scan_mode(qdev); 705 706 return 0; 707 } 708 709 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 710 { 711 struct ql3xxx_port_registers __iomem *port_regs = 712 qdev->mem_map_registers; 713 714 ql_mii_disable_scan_mode(qdev); 715 716 if (ql_wait_for_mii_ready(qdev)) { 717 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 718 return -1; 719 } 720 721 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 722 qdev->PHYAddr | regAddr); 723 724 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 725 726 /* Wait for write to complete. */ 727 if (ql_wait_for_mii_ready(qdev)) { 728 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 729 return -1; 730 } 731 732 ql_mii_enable_scan_mode(qdev); 733 734 return 0; 735 } 736 737 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 738 { 739 u32 temp; 740 struct ql3xxx_port_registers __iomem *port_regs = 741 qdev->mem_map_registers; 742 743 ql_mii_disable_scan_mode(qdev); 744 745 if (ql_wait_for_mii_ready(qdev)) { 746 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 747 return -1; 748 } 749 750 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 751 qdev->PHYAddr | regAddr); 752 753 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 754 (MAC_MII_CONTROL_RC << 16)); 755 756 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 757 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 758 759 /* Wait for the read to complete */ 760 if (ql_wait_for_mii_ready(qdev)) { 761 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 762 return -1; 763 } 764 765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 766 *value = (u16) temp; 767 768 ql_mii_enable_scan_mode(qdev); 769 770 return 0; 771 } 772 773 static void ql_petbi_reset(struct ql3_adapter *qdev) 774 { 775 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 776 } 777 778 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 779 { 780 u16 reg; 781 782 /* Enable Auto-negotiation sense */ 783 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 784 reg |= PETBI_TBI_AUTO_SENSE; 785 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 786 787 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 788 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 789 790 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 791 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 792 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 793 794 } 795 796 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 797 { 798 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 799 PHYAddr[qdev->mac_index]); 800 } 801 802 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 803 { 804 u16 reg; 805 806 /* Enable Auto-negotiation sense */ 807 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 808 PHYAddr[qdev->mac_index]); 809 reg |= PETBI_TBI_AUTO_SENSE; 810 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 811 PHYAddr[qdev->mac_index]); 812 813 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 814 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 815 PHYAddr[qdev->mac_index]); 816 817 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 818 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 819 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 820 PHYAddr[qdev->mac_index]); 821 } 822 823 static void ql_petbi_init(struct ql3_adapter *qdev) 824 { 825 ql_petbi_reset(qdev); 826 ql_petbi_start_neg(qdev); 827 } 828 829 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 830 { 831 ql_petbi_reset_ex(qdev); 832 ql_petbi_start_neg_ex(qdev); 833 } 834 835 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 836 { 837 u16 reg; 838 839 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 840 return 0; 841 842 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 843 } 844 845 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 846 { 847 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 848 /* power down device bit 11 = 1 */ 849 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 850 /* enable diagnostic mode bit 2 = 1 */ 851 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 852 /* 1000MB amplitude adjust (see Agere errata) */ 853 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 854 /* 1000MB amplitude adjust (see Agere errata) */ 855 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 856 /* 100MB amplitude adjust (see Agere errata) */ 857 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 858 /* 100MB amplitude adjust (see Agere errata) */ 859 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 860 /* 10MB amplitude adjust (see Agere errata) */ 861 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 862 /* 10MB amplitude adjust (see Agere errata) */ 863 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 864 /* point to hidden reg 0x2806 */ 865 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 866 /* Write new PHYAD w/bit 5 set */ 867 ql_mii_write_reg_ex(qdev, 0x11, 868 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 869 /* 870 * Disable diagnostic mode bit 2 = 0 871 * Power up device bit 11 = 0 872 * Link up (on) and activity (blink) 873 */ 874 ql_mii_write_reg(qdev, 0x12, 0x840a); 875 ql_mii_write_reg(qdev, 0x00, 0x1140); 876 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 877 } 878 879 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 880 u16 phyIdReg0, u16 phyIdReg1) 881 { 882 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 883 u32 oui; 884 u16 model; 885 int i; 886 887 if (phyIdReg0 == 0xffff) 888 return result; 889 890 if (phyIdReg1 == 0xffff) 891 return result; 892 893 /* oui is split between two registers */ 894 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 895 896 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 897 898 /* Scan table for this PHY */ 899 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 900 if ((oui == PHY_DEVICES[i].phyIdOUI) && 901 (model == PHY_DEVICES[i].phyIdModel)) { 902 netdev_info(qdev->ndev, "Phy: %s\n", 903 PHY_DEVICES[i].name); 904 result = PHY_DEVICES[i].phyDevice; 905 break; 906 } 907 } 908 909 return result; 910 } 911 912 static int ql_phy_get_speed(struct ql3_adapter *qdev) 913 { 914 u16 reg; 915 916 switch (qdev->phyType) { 917 case PHY_AGERE_ET1011C: { 918 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 919 return 0; 920 921 reg = (reg >> 8) & 3; 922 break; 923 } 924 default: 925 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 926 return 0; 927 928 reg = (((reg & 0x18) >> 3) & 3); 929 } 930 931 switch (reg) { 932 case 2: 933 return SPEED_1000; 934 case 1: 935 return SPEED_100; 936 case 0: 937 return SPEED_10; 938 default: 939 return -1; 940 } 941 } 942 943 static int ql_is_full_dup(struct ql3_adapter *qdev) 944 { 945 u16 reg; 946 947 switch (qdev->phyType) { 948 case PHY_AGERE_ET1011C: { 949 if (ql_mii_read_reg(qdev, 0x1A, ®)) 950 return 0; 951 952 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 953 } 954 case PHY_VITESSE_VSC8211: 955 default: { 956 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 957 return 0; 958 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 959 } 960 } 961 } 962 963 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 964 { 965 u16 reg; 966 967 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 968 return 0; 969 970 return (reg & PHY_NEG_PAUSE) != 0; 971 } 972 973 static int PHY_Setup(struct ql3_adapter *qdev) 974 { 975 u16 reg1; 976 u16 reg2; 977 bool agereAddrChangeNeeded = false; 978 u32 miiAddr = 0; 979 int err; 980 981 /* Determine the PHY we are using by reading the ID's */ 982 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 983 if (err != 0) { 984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 985 return err; 986 } 987 988 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 989 if (err != 0) { 990 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 991 return err; 992 } 993 994 /* Check if we have a Agere PHY */ 995 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 996 997 /* Determine which MII address we should be using 998 determined by the index of the card */ 999 if (qdev->mac_index == 0) 1000 miiAddr = MII_AGERE_ADDR_1; 1001 else 1002 miiAddr = MII_AGERE_ADDR_2; 1003 1004 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1005 if (err != 0) { 1006 netdev_err(qdev->ndev, 1007 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1008 return err; 1009 } 1010 1011 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1012 if (err != 0) { 1013 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1014 return err; 1015 } 1016 1017 /* We need to remember to initialize the Agere PHY */ 1018 agereAddrChangeNeeded = true; 1019 } 1020 1021 /* Determine the particular PHY we have on board to apply 1022 PHY specific initializations */ 1023 qdev->phyType = getPhyType(qdev, reg1, reg2); 1024 1025 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1026 /* need this here so address gets changed */ 1027 phyAgereSpecificInit(qdev, miiAddr); 1028 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1029 netdev_err(qdev->ndev, "PHY is unknown\n"); 1030 return -EIO; 1031 } 1032 1033 return 0; 1034 } 1035 1036 /* 1037 * Caller holds hw_lock. 1038 */ 1039 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1040 { 1041 struct ql3xxx_port_registers __iomem *port_regs = 1042 qdev->mem_map_registers; 1043 u32 value; 1044 1045 if (enable) 1046 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1047 else 1048 value = (MAC_CONFIG_REG_PE << 16); 1049 1050 if (qdev->mac_index) 1051 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1052 else 1053 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1054 } 1055 1056 /* 1057 * Caller holds hw_lock. 1058 */ 1059 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1060 { 1061 struct ql3xxx_port_registers __iomem *port_regs = 1062 qdev->mem_map_registers; 1063 u32 value; 1064 1065 if (enable) 1066 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1067 else 1068 value = (MAC_CONFIG_REG_SR << 16); 1069 1070 if (qdev->mac_index) 1071 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1072 else 1073 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1074 } 1075 1076 /* 1077 * Caller holds hw_lock. 1078 */ 1079 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1080 { 1081 struct ql3xxx_port_registers __iomem *port_regs = 1082 qdev->mem_map_registers; 1083 u32 value; 1084 1085 if (enable) 1086 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1087 else 1088 value = (MAC_CONFIG_REG_GM << 16); 1089 1090 if (qdev->mac_index) 1091 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1092 else 1093 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1094 } 1095 1096 /* 1097 * Caller holds hw_lock. 1098 */ 1099 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1100 { 1101 struct ql3xxx_port_registers __iomem *port_regs = 1102 qdev->mem_map_registers; 1103 u32 value; 1104 1105 if (enable) 1106 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1107 else 1108 value = (MAC_CONFIG_REG_FD << 16); 1109 1110 if (qdev->mac_index) 1111 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1112 else 1113 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1114 } 1115 1116 /* 1117 * Caller holds hw_lock. 1118 */ 1119 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1120 { 1121 struct ql3xxx_port_registers __iomem *port_regs = 1122 qdev->mem_map_registers; 1123 u32 value; 1124 1125 if (enable) 1126 value = 1127 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1128 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1129 else 1130 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1131 1132 if (qdev->mac_index) 1133 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1134 else 1135 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1136 } 1137 1138 /* 1139 * Caller holds hw_lock. 1140 */ 1141 static int ql_is_fiber(struct ql3_adapter *qdev) 1142 { 1143 struct ql3xxx_port_registers __iomem *port_regs = 1144 qdev->mem_map_registers; 1145 u32 bitToCheck = 0; 1146 u32 temp; 1147 1148 switch (qdev->mac_index) { 1149 case 0: 1150 bitToCheck = PORT_STATUS_SM0; 1151 break; 1152 case 1: 1153 bitToCheck = PORT_STATUS_SM1; 1154 break; 1155 } 1156 1157 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1158 return (temp & bitToCheck) != 0; 1159 } 1160 1161 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1162 { 1163 u16 reg; 1164 ql_mii_read_reg(qdev, 0x00, ®); 1165 return (reg & 0x1000) != 0; 1166 } 1167 1168 /* 1169 * Caller holds hw_lock. 1170 */ 1171 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1172 { 1173 struct ql3xxx_port_registers __iomem *port_regs = 1174 qdev->mem_map_registers; 1175 u32 bitToCheck = 0; 1176 u32 temp; 1177 1178 switch (qdev->mac_index) { 1179 case 0: 1180 bitToCheck = PORT_STATUS_AC0; 1181 break; 1182 case 1: 1183 bitToCheck = PORT_STATUS_AC1; 1184 break; 1185 } 1186 1187 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1188 if (temp & bitToCheck) { 1189 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1190 return 1; 1191 } 1192 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1193 return 0; 1194 } 1195 1196 /* 1197 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1198 */ 1199 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1200 { 1201 if (ql_is_fiber(qdev)) 1202 return ql_is_petbi_neg_pause(qdev); 1203 else 1204 return ql_is_phy_neg_pause(qdev); 1205 } 1206 1207 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1208 { 1209 struct ql3xxx_port_registers __iomem *port_regs = 1210 qdev->mem_map_registers; 1211 u32 bitToCheck = 0; 1212 u32 temp; 1213 1214 switch (qdev->mac_index) { 1215 case 0: 1216 bitToCheck = PORT_STATUS_AE0; 1217 break; 1218 case 1: 1219 bitToCheck = PORT_STATUS_AE1; 1220 break; 1221 } 1222 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1223 return (temp & bitToCheck) != 0; 1224 } 1225 1226 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1227 { 1228 if (ql_is_fiber(qdev)) 1229 return SPEED_1000; 1230 else 1231 return ql_phy_get_speed(qdev); 1232 } 1233 1234 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1235 { 1236 if (ql_is_fiber(qdev)) 1237 return 1; 1238 else 1239 return ql_is_full_dup(qdev); 1240 } 1241 1242 /* 1243 * Caller holds hw_lock. 1244 */ 1245 static int ql_link_down_detect(struct ql3_adapter *qdev) 1246 { 1247 struct ql3xxx_port_registers __iomem *port_regs = 1248 qdev->mem_map_registers; 1249 u32 bitToCheck = 0; 1250 u32 temp; 1251 1252 switch (qdev->mac_index) { 1253 case 0: 1254 bitToCheck = ISP_CONTROL_LINK_DN_0; 1255 break; 1256 case 1: 1257 bitToCheck = ISP_CONTROL_LINK_DN_1; 1258 break; 1259 } 1260 1261 temp = 1262 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1263 return (temp & bitToCheck) != 0; 1264 } 1265 1266 /* 1267 * Caller holds hw_lock. 1268 */ 1269 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1270 { 1271 struct ql3xxx_port_registers __iomem *port_regs = 1272 qdev->mem_map_registers; 1273 1274 switch (qdev->mac_index) { 1275 case 0: 1276 ql_write_common_reg(qdev, 1277 &port_regs->CommonRegs.ispControlStatus, 1278 (ISP_CONTROL_LINK_DN_0) | 1279 (ISP_CONTROL_LINK_DN_0 << 16)); 1280 break; 1281 1282 case 1: 1283 ql_write_common_reg(qdev, 1284 &port_regs->CommonRegs.ispControlStatus, 1285 (ISP_CONTROL_LINK_DN_1) | 1286 (ISP_CONTROL_LINK_DN_1 << 16)); 1287 break; 1288 1289 default: 1290 return 1; 1291 } 1292 1293 return 0; 1294 } 1295 1296 /* 1297 * Caller holds hw_lock. 1298 */ 1299 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1300 { 1301 struct ql3xxx_port_registers __iomem *port_regs = 1302 qdev->mem_map_registers; 1303 u32 bitToCheck = 0; 1304 u32 temp; 1305 1306 switch (qdev->mac_index) { 1307 case 0: 1308 bitToCheck = PORT_STATUS_F1_ENABLED; 1309 break; 1310 case 1: 1311 bitToCheck = PORT_STATUS_F3_ENABLED; 1312 break; 1313 default: 1314 break; 1315 } 1316 1317 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1318 if (temp & bitToCheck) { 1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1320 "not link master\n"); 1321 return 0; 1322 } 1323 1324 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1325 return 1; 1326 } 1327 1328 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1329 { 1330 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1331 PHYAddr[qdev->mac_index]); 1332 } 1333 1334 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1335 { 1336 u16 reg; 1337 u16 portConfiguration; 1338 1339 if (qdev->phyType == PHY_AGERE_ET1011C) 1340 ql_mii_write_reg(qdev, 0x13, 0x0000); 1341 /* turn off external loopback */ 1342 1343 if (qdev->mac_index == 0) 1344 portConfiguration = 1345 qdev->nvram_data.macCfg_port0.portConfiguration; 1346 else 1347 portConfiguration = 1348 qdev->nvram_data.macCfg_port1.portConfiguration; 1349 1350 /* Some HBA's in the field are set to 0 and they need to 1351 be reinterpreted with a default value */ 1352 if (portConfiguration == 0) 1353 portConfiguration = PORT_CONFIG_DEFAULT; 1354 1355 /* Set the 1000 advertisements */ 1356 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1357 PHYAddr[qdev->mac_index]); 1358 reg &= ~PHY_GIG_ALL_PARAMS; 1359 1360 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1361 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1362 reg |= PHY_GIG_ADV_1000F; 1363 else 1364 reg |= PHY_GIG_ADV_1000H; 1365 } 1366 1367 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1368 PHYAddr[qdev->mac_index]); 1369 1370 /* Set the 10/100 & pause negotiation advertisements */ 1371 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1372 PHYAddr[qdev->mac_index]); 1373 reg &= ~PHY_NEG_ALL_PARAMS; 1374 1375 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1376 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1377 1378 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1379 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1380 reg |= PHY_NEG_ADV_100F; 1381 1382 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1383 reg |= PHY_NEG_ADV_10F; 1384 } 1385 1386 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1387 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1388 reg |= PHY_NEG_ADV_100H; 1389 1390 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1391 reg |= PHY_NEG_ADV_10H; 1392 } 1393 1394 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1395 reg |= 1; 1396 1397 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1398 PHYAddr[qdev->mac_index]); 1399 1400 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1401 1402 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1403 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1404 PHYAddr[qdev->mac_index]); 1405 } 1406 1407 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1408 { 1409 ql_phy_reset_ex(qdev); 1410 PHY_Setup(qdev); 1411 ql_phy_start_neg_ex(qdev); 1412 } 1413 1414 /* 1415 * Caller holds hw_lock. 1416 */ 1417 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1418 { 1419 struct ql3xxx_port_registers __iomem *port_regs = 1420 qdev->mem_map_registers; 1421 u32 bitToCheck = 0; 1422 u32 temp, linkState; 1423 1424 switch (qdev->mac_index) { 1425 case 0: 1426 bitToCheck = PORT_STATUS_UP0; 1427 break; 1428 case 1: 1429 bitToCheck = PORT_STATUS_UP1; 1430 break; 1431 } 1432 1433 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1434 if (temp & bitToCheck) 1435 linkState = LS_UP; 1436 else 1437 linkState = LS_DOWN; 1438 1439 return linkState; 1440 } 1441 1442 static int ql_port_start(struct ql3_adapter *qdev) 1443 { 1444 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1445 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1446 2) << 7)) { 1447 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1448 return -1; 1449 } 1450 1451 if (ql_is_fiber(qdev)) { 1452 ql_petbi_init(qdev); 1453 } else { 1454 /* Copper port */ 1455 ql_phy_init_ex(qdev); 1456 } 1457 1458 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1459 return 0; 1460 } 1461 1462 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1463 { 1464 1465 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1466 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1467 2) << 7)) 1468 return -1; 1469 1470 if (!ql_auto_neg_error(qdev)) { 1471 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1472 /* configure the MAC */ 1473 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1474 "Configuring link\n"); 1475 ql_mac_cfg_soft_reset(qdev, 1); 1476 ql_mac_cfg_gig(qdev, 1477 (ql_get_link_speed 1478 (qdev) == 1479 SPEED_1000)); 1480 ql_mac_cfg_full_dup(qdev, 1481 ql_is_link_full_dup 1482 (qdev)); 1483 ql_mac_cfg_pause(qdev, 1484 ql_is_neg_pause 1485 (qdev)); 1486 ql_mac_cfg_soft_reset(qdev, 0); 1487 1488 /* enable the MAC */ 1489 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1490 "Enabling mac\n"); 1491 ql_mac_enable(qdev, 1); 1492 } 1493 1494 qdev->port_link_state = LS_UP; 1495 netif_start_queue(qdev->ndev); 1496 netif_carrier_on(qdev->ndev); 1497 netif_info(qdev, link, qdev->ndev, 1498 "Link is up at %d Mbps, %s duplex\n", 1499 ql_get_link_speed(qdev), 1500 ql_is_link_full_dup(qdev) ? "full" : "half"); 1501 1502 } else { /* Remote error detected */ 1503 1504 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1505 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1506 "Remote error detected. Calling ql_port_start()\n"); 1507 /* 1508 * ql_port_start() is shared code and needs 1509 * to lock the PHY on it's own. 1510 */ 1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1512 if (ql_port_start(qdev)) /* Restart port */ 1513 return -1; 1514 return 0; 1515 } 1516 } 1517 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1518 return 0; 1519 } 1520 1521 static void ql_link_state_machine_work(struct work_struct *work) 1522 { 1523 struct ql3_adapter *qdev = 1524 container_of(work, struct ql3_adapter, link_state_work.work); 1525 1526 u32 curr_link_state; 1527 unsigned long hw_flags; 1528 1529 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1530 1531 curr_link_state = ql_get_link_state(qdev); 1532 1533 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1534 netif_info(qdev, link, qdev->ndev, 1535 "Reset in progress, skip processing link state\n"); 1536 1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1538 1539 /* Restart timer on 2 second interval. */ 1540 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1541 1542 return; 1543 } 1544 1545 switch (qdev->port_link_state) { 1546 default: 1547 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1548 ql_port_start(qdev); 1549 qdev->port_link_state = LS_DOWN; 1550 /* Fall Through */ 1551 1552 case LS_DOWN: 1553 if (curr_link_state == LS_UP) { 1554 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1555 if (ql_is_auto_neg_complete(qdev)) 1556 ql_finish_auto_neg(qdev); 1557 1558 if (qdev->port_link_state == LS_UP) 1559 ql_link_down_detect_clear(qdev); 1560 1561 qdev->port_link_state = LS_UP; 1562 } 1563 break; 1564 1565 case LS_UP: 1566 /* 1567 * See if the link is currently down or went down and came 1568 * back up 1569 */ 1570 if (curr_link_state == LS_DOWN) { 1571 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1572 qdev->port_link_state = LS_DOWN; 1573 } 1574 if (ql_link_down_detect(qdev)) 1575 qdev->port_link_state = LS_DOWN; 1576 break; 1577 } 1578 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1579 1580 /* Restart timer on 2 second interval. */ 1581 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1582 } 1583 1584 /* 1585 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1586 */ 1587 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1588 { 1589 if (ql_this_adapter_controls_port(qdev)) 1590 set_bit(QL_LINK_MASTER, &qdev->flags); 1591 else 1592 clear_bit(QL_LINK_MASTER, &qdev->flags); 1593 } 1594 1595 /* 1596 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1597 */ 1598 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1599 { 1600 ql_mii_enable_scan_mode(qdev); 1601 1602 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1603 if (ql_this_adapter_controls_port(qdev)) 1604 ql_petbi_init_ex(qdev); 1605 } else { 1606 if (ql_this_adapter_controls_port(qdev)) 1607 ql_phy_init_ex(qdev); 1608 } 1609 } 1610 1611 /* 1612 * MII_Setup needs to be called before taking the PHY out of reset 1613 * so that the management interface clock speed can be set properly. 1614 * It would be better if we had a way to disable MDC until after the 1615 * PHY is out of reset, but we don't have that capability. 1616 */ 1617 static int ql_mii_setup(struct ql3_adapter *qdev) 1618 { 1619 u32 reg; 1620 struct ql3xxx_port_registers __iomem *port_regs = 1621 qdev->mem_map_registers; 1622 1623 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1624 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1625 2) << 7)) 1626 return -1; 1627 1628 if (qdev->device_id == QL3032_DEVICE_ID) 1629 ql_write_page0_reg(qdev, 1630 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1631 1632 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1633 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1634 1635 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1636 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1637 1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1639 return 0; 1640 } 1641 1642 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1643 SUPPORTED_FIBRE | \ 1644 SUPPORTED_Autoneg) 1645 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1646 SUPPORTED_10baseT_Full | \ 1647 SUPPORTED_100baseT_Half | \ 1648 SUPPORTED_100baseT_Full | \ 1649 SUPPORTED_1000baseT_Half | \ 1650 SUPPORTED_1000baseT_Full | \ 1651 SUPPORTED_Autoneg | \ 1652 SUPPORTED_TP) \ 1653 1654 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1655 { 1656 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1657 return SUPPORTED_OPTICAL_MODES; 1658 1659 return SUPPORTED_TP_MODES; 1660 } 1661 1662 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1663 { 1664 int status; 1665 unsigned long hw_flags; 1666 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1667 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1668 (QL_RESOURCE_BITS_BASE_CODE | 1669 (qdev->mac_index) * 2) << 7)) { 1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1671 return 0; 1672 } 1673 status = ql_is_auto_cfg(qdev); 1674 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1675 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1676 return status; 1677 } 1678 1679 static u32 ql_get_speed(struct ql3_adapter *qdev) 1680 { 1681 u32 status; 1682 unsigned long hw_flags; 1683 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1684 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1685 (QL_RESOURCE_BITS_BASE_CODE | 1686 (qdev->mac_index) * 2) << 7)) { 1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1688 return 0; 1689 } 1690 status = ql_get_link_speed(qdev); 1691 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1692 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1693 return status; 1694 } 1695 1696 static int ql_get_full_dup(struct ql3_adapter *qdev) 1697 { 1698 int status; 1699 unsigned long hw_flags; 1700 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1701 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1702 (QL_RESOURCE_BITS_BASE_CODE | 1703 (qdev->mac_index) * 2) << 7)) { 1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1705 return 0; 1706 } 1707 status = ql_is_link_full_dup(qdev); 1708 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1709 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1710 return status; 1711 } 1712 1713 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1714 { 1715 struct ql3_adapter *qdev = netdev_priv(ndev); 1716 1717 ecmd->transceiver = XCVR_INTERNAL; 1718 ecmd->supported = ql_supported_modes(qdev); 1719 1720 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1721 ecmd->port = PORT_FIBRE; 1722 } else { 1723 ecmd->port = PORT_TP; 1724 ecmd->phy_address = qdev->PHYAddr; 1725 } 1726 ecmd->advertising = ql_supported_modes(qdev); 1727 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1728 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1729 ecmd->duplex = ql_get_full_dup(qdev); 1730 return 0; 1731 } 1732 1733 static void ql_get_drvinfo(struct net_device *ndev, 1734 struct ethtool_drvinfo *drvinfo) 1735 { 1736 struct ql3_adapter *qdev = netdev_priv(ndev); 1737 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1738 strlcpy(drvinfo->version, ql3xxx_driver_version, 1739 sizeof(drvinfo->version)); 1740 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1741 sizeof(drvinfo->bus_info)); 1742 drvinfo->regdump_len = 0; 1743 drvinfo->eedump_len = 0; 1744 } 1745 1746 static u32 ql_get_msglevel(struct net_device *ndev) 1747 { 1748 struct ql3_adapter *qdev = netdev_priv(ndev); 1749 return qdev->msg_enable; 1750 } 1751 1752 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1753 { 1754 struct ql3_adapter *qdev = netdev_priv(ndev); 1755 qdev->msg_enable = value; 1756 } 1757 1758 static void ql_get_pauseparam(struct net_device *ndev, 1759 struct ethtool_pauseparam *pause) 1760 { 1761 struct ql3_adapter *qdev = netdev_priv(ndev); 1762 struct ql3xxx_port_registers __iomem *port_regs = 1763 qdev->mem_map_registers; 1764 1765 u32 reg; 1766 if (qdev->mac_index == 0) 1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1768 else 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1770 1771 pause->autoneg = ql_get_auto_cfg_status(qdev); 1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1774 } 1775 1776 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1777 .get_settings = ql_get_settings, 1778 .get_drvinfo = ql_get_drvinfo, 1779 .get_link = ethtool_op_get_link, 1780 .get_msglevel = ql_get_msglevel, 1781 .set_msglevel = ql_set_msglevel, 1782 .get_pauseparam = ql_get_pauseparam, 1783 }; 1784 1785 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1786 { 1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1788 dma_addr_t map; 1789 int err; 1790 1791 while (lrg_buf_cb) { 1792 if (!lrg_buf_cb->skb) { 1793 lrg_buf_cb->skb = 1794 netdev_alloc_skb(qdev->ndev, 1795 qdev->lrg_buffer_len); 1796 if (unlikely(!lrg_buf_cb->skb)) { 1797 netdev_printk(KERN_DEBUG, qdev->ndev, 1798 "Failed netdev_alloc_skb()\n"); 1799 break; 1800 } else { 1801 /* 1802 * We save some space to copy the ethhdr from 1803 * first buffer 1804 */ 1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1806 map = pci_map_single(qdev->pdev, 1807 lrg_buf_cb->skb->data, 1808 qdev->lrg_buffer_len - 1809 QL_HEADER_SPACE, 1810 PCI_DMA_FROMDEVICE); 1811 1812 err = pci_dma_mapping_error(qdev->pdev, map); 1813 if (err) { 1814 netdev_err(qdev->ndev, 1815 "PCI mapping failed with error: %d\n", 1816 err); 1817 dev_kfree_skb(lrg_buf_cb->skb); 1818 lrg_buf_cb->skb = NULL; 1819 break; 1820 } 1821 1822 1823 lrg_buf_cb->buf_phy_addr_low = 1824 cpu_to_le32(LS_64BITS(map)); 1825 lrg_buf_cb->buf_phy_addr_high = 1826 cpu_to_le32(MS_64BITS(map)); 1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1828 dma_unmap_len_set(lrg_buf_cb, maplen, 1829 qdev->lrg_buffer_len - 1830 QL_HEADER_SPACE); 1831 --qdev->lrg_buf_skb_check; 1832 if (!qdev->lrg_buf_skb_check) 1833 return 1; 1834 } 1835 } 1836 lrg_buf_cb = lrg_buf_cb->next; 1837 } 1838 return 0; 1839 } 1840 1841 /* 1842 * Caller holds hw_lock. 1843 */ 1844 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1845 { 1846 struct ql3xxx_port_registers __iomem *port_regs = 1847 qdev->mem_map_registers; 1848 1849 if (qdev->small_buf_release_cnt >= 16) { 1850 while (qdev->small_buf_release_cnt >= 16) { 1851 qdev->small_buf_q_producer_index++; 1852 1853 if (qdev->small_buf_q_producer_index == 1854 NUM_SBUFQ_ENTRIES) 1855 qdev->small_buf_q_producer_index = 0; 1856 qdev->small_buf_release_cnt -= 8; 1857 } 1858 wmb(); 1859 writel(qdev->small_buf_q_producer_index, 1860 &port_regs->CommonRegs.rxSmallQProducerIndex); 1861 } 1862 } 1863 1864 /* 1865 * Caller holds hw_lock. 1866 */ 1867 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1868 { 1869 struct bufq_addr_element *lrg_buf_q_ele; 1870 int i; 1871 struct ql_rcv_buf_cb *lrg_buf_cb; 1872 struct ql3xxx_port_registers __iomem *port_regs = 1873 qdev->mem_map_registers; 1874 1875 if ((qdev->lrg_buf_free_count >= 8) && 1876 (qdev->lrg_buf_release_cnt >= 16)) { 1877 1878 if (qdev->lrg_buf_skb_check) 1879 if (!ql_populate_free_queue(qdev)) 1880 return; 1881 1882 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1883 1884 while ((qdev->lrg_buf_release_cnt >= 16) && 1885 (qdev->lrg_buf_free_count >= 8)) { 1886 1887 for (i = 0; i < 8; i++) { 1888 lrg_buf_cb = 1889 ql_get_from_lrg_buf_free_list(qdev); 1890 lrg_buf_q_ele->addr_high = 1891 lrg_buf_cb->buf_phy_addr_high; 1892 lrg_buf_q_ele->addr_low = 1893 lrg_buf_cb->buf_phy_addr_low; 1894 lrg_buf_q_ele++; 1895 1896 qdev->lrg_buf_release_cnt--; 1897 } 1898 1899 qdev->lrg_buf_q_producer_index++; 1900 1901 if (qdev->lrg_buf_q_producer_index == 1902 qdev->num_lbufq_entries) 1903 qdev->lrg_buf_q_producer_index = 0; 1904 1905 if (qdev->lrg_buf_q_producer_index == 1906 (qdev->num_lbufq_entries - 1)) { 1907 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1908 } 1909 } 1910 wmb(); 1911 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1912 writel(qdev->lrg_buf_q_producer_index, 1913 &port_regs->CommonRegs.rxLargeQProducerIndex); 1914 } 1915 } 1916 1917 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1918 struct ob_mac_iocb_rsp *mac_rsp) 1919 { 1920 struct ql_tx_buf_cb *tx_cb; 1921 int i; 1922 1923 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1924 netdev_warn(qdev->ndev, 1925 "Frame too short but it was padded and sent\n"); 1926 } 1927 1928 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1929 1930 /* Check the transmit response flags for any errors */ 1931 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1932 netdev_err(qdev->ndev, 1933 "Frame too short to be legal, frame not sent\n"); 1934 1935 qdev->ndev->stats.tx_errors++; 1936 goto frame_not_sent; 1937 } 1938 1939 if (tx_cb->seg_count == 0) { 1940 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1941 mac_rsp->transaction_id); 1942 1943 qdev->ndev->stats.tx_errors++; 1944 goto invalid_seg_count; 1945 } 1946 1947 pci_unmap_single(qdev->pdev, 1948 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1949 dma_unmap_len(&tx_cb->map[0], maplen), 1950 PCI_DMA_TODEVICE); 1951 tx_cb->seg_count--; 1952 if (tx_cb->seg_count) { 1953 for (i = 1; i < tx_cb->seg_count; i++) { 1954 pci_unmap_page(qdev->pdev, 1955 dma_unmap_addr(&tx_cb->map[i], 1956 mapaddr), 1957 dma_unmap_len(&tx_cb->map[i], maplen), 1958 PCI_DMA_TODEVICE); 1959 } 1960 } 1961 qdev->ndev->stats.tx_packets++; 1962 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1963 1964 frame_not_sent: 1965 dev_kfree_skb_irq(tx_cb->skb); 1966 tx_cb->skb = NULL; 1967 1968 invalid_seg_count: 1969 atomic_inc(&qdev->tx_count); 1970 } 1971 1972 static void ql_get_sbuf(struct ql3_adapter *qdev) 1973 { 1974 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1975 qdev->small_buf_index = 0; 1976 qdev->small_buf_release_cnt++; 1977 } 1978 1979 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1980 { 1981 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1982 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1983 qdev->lrg_buf_release_cnt++; 1984 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1985 qdev->lrg_buf_index = 0; 1986 return lrg_buf_cb; 1987 } 1988 1989 /* 1990 * The difference between 3022 and 3032 for inbound completions: 1991 * 3022 uses two buffers per completion. The first buffer contains 1992 * (some) header info, the second the remainder of the headers plus 1993 * the data. For this chip we reserve some space at the top of the 1994 * receive buffer so that the header info in buffer one can be 1995 * prepended to the buffer two. Buffer two is the sent up while 1996 * buffer one is returned to the hardware to be reused. 1997 * 3032 receives all of it's data and headers in one buffer for a 1998 * simpler process. 3032 also supports checksum verification as 1999 * can be seen in ql_process_macip_rx_intr(). 2000 */ 2001 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2002 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2003 { 2004 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2005 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2006 struct sk_buff *skb; 2007 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2008 2009 /* 2010 * Get the inbound address list (small buffer). 2011 */ 2012 ql_get_sbuf(qdev); 2013 2014 if (qdev->device_id == QL3022_DEVICE_ID) 2015 lrg_buf_cb1 = ql_get_lbuf(qdev); 2016 2017 /* start of second buffer */ 2018 lrg_buf_cb2 = ql_get_lbuf(qdev); 2019 skb = lrg_buf_cb2->skb; 2020 2021 qdev->ndev->stats.rx_packets++; 2022 qdev->ndev->stats.rx_bytes += length; 2023 2024 skb_put(skb, length); 2025 pci_unmap_single(qdev->pdev, 2026 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2027 dma_unmap_len(lrg_buf_cb2, maplen), 2028 PCI_DMA_FROMDEVICE); 2029 prefetch(skb->data); 2030 skb_checksum_none_assert(skb); 2031 skb->protocol = eth_type_trans(skb, qdev->ndev); 2032 2033 netif_receive_skb(skb); 2034 lrg_buf_cb2->skb = NULL; 2035 2036 if (qdev->device_id == QL3022_DEVICE_ID) 2037 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2038 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2039 } 2040 2041 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2042 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2043 { 2044 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2045 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2046 struct sk_buff *skb1 = NULL, *skb2; 2047 struct net_device *ndev = qdev->ndev; 2048 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2049 u16 size = 0; 2050 2051 /* 2052 * Get the inbound address list (small buffer). 2053 */ 2054 2055 ql_get_sbuf(qdev); 2056 2057 if (qdev->device_id == QL3022_DEVICE_ID) { 2058 /* start of first buffer on 3022 */ 2059 lrg_buf_cb1 = ql_get_lbuf(qdev); 2060 skb1 = lrg_buf_cb1->skb; 2061 size = ETH_HLEN; 2062 if (*((u16 *) skb1->data) != 0xFFFF) 2063 size += VLAN_ETH_HLEN - ETH_HLEN; 2064 } 2065 2066 /* start of second buffer */ 2067 lrg_buf_cb2 = ql_get_lbuf(qdev); 2068 skb2 = lrg_buf_cb2->skb; 2069 2070 skb_put(skb2, length); /* Just the second buffer length here. */ 2071 pci_unmap_single(qdev->pdev, 2072 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2073 dma_unmap_len(lrg_buf_cb2, maplen), 2074 PCI_DMA_FROMDEVICE); 2075 prefetch(skb2->data); 2076 2077 skb_checksum_none_assert(skb2); 2078 if (qdev->device_id == QL3022_DEVICE_ID) { 2079 /* 2080 * Copy the ethhdr from first buffer to second. This 2081 * is necessary for 3022 IP completions. 2082 */ 2083 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2084 skb_push(skb2, size), size); 2085 } else { 2086 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2087 if (checksum & 2088 (IB_IP_IOCB_RSP_3032_ICE | 2089 IB_IP_IOCB_RSP_3032_CE)) { 2090 netdev_err(ndev, 2091 "%s: Bad checksum for this %s packet, checksum = %x\n", 2092 __func__, 2093 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2094 "TCP" : "UDP"), checksum); 2095 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2096 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2097 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2098 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2099 } 2100 } 2101 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2102 2103 netif_receive_skb(skb2); 2104 ndev->stats.rx_packets++; 2105 ndev->stats.rx_bytes += length; 2106 lrg_buf_cb2->skb = NULL; 2107 2108 if (qdev->device_id == QL3022_DEVICE_ID) 2109 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2110 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2111 } 2112 2113 static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2114 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2115 { 2116 struct net_rsp_iocb *net_rsp; 2117 struct net_device *ndev = qdev->ndev; 2118 int work_done = 0; 2119 2120 /* While there are entries in the completion queue. */ 2121 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2122 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2123 2124 net_rsp = qdev->rsp_current; 2125 rmb(); 2126 /* 2127 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2128 * if the inbound completion is for a VLAN. 2129 */ 2130 if (qdev->device_id == QL3032_DEVICE_ID) 2131 net_rsp->opcode &= 0x7f; 2132 switch (net_rsp->opcode) { 2133 2134 case OPCODE_OB_MAC_IOCB_FN0: 2135 case OPCODE_OB_MAC_IOCB_FN2: 2136 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2137 net_rsp); 2138 (*tx_cleaned)++; 2139 break; 2140 2141 case OPCODE_IB_MAC_IOCB: 2142 case OPCODE_IB_3032_MAC_IOCB: 2143 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2144 net_rsp); 2145 (*rx_cleaned)++; 2146 break; 2147 2148 case OPCODE_IB_IP_IOCB: 2149 case OPCODE_IB_3032_IP_IOCB: 2150 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2151 net_rsp); 2152 (*rx_cleaned)++; 2153 break; 2154 default: { 2155 u32 *tmp = (u32 *)net_rsp; 2156 netdev_err(ndev, 2157 "Hit default case, not handled!\n" 2158 " dropping the packet, opcode = %x\n" 2159 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2160 net_rsp->opcode, 2161 (unsigned long int)tmp[0], 2162 (unsigned long int)tmp[1], 2163 (unsigned long int)tmp[2], 2164 (unsigned long int)tmp[3]); 2165 } 2166 } 2167 2168 qdev->rsp_consumer_index++; 2169 2170 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2171 qdev->rsp_consumer_index = 0; 2172 qdev->rsp_current = qdev->rsp_q_virt_addr; 2173 } else { 2174 qdev->rsp_current++; 2175 } 2176 2177 work_done = *tx_cleaned + *rx_cleaned; 2178 } 2179 2180 return work_done; 2181 } 2182 2183 static int ql_poll(struct napi_struct *napi, int budget) 2184 { 2185 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2186 int rx_cleaned = 0, tx_cleaned = 0; 2187 unsigned long hw_flags; 2188 struct ql3xxx_port_registers __iomem *port_regs = 2189 qdev->mem_map_registers; 2190 2191 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2192 2193 if (tx_cleaned + rx_cleaned != budget) { 2194 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2195 __napi_complete(napi); 2196 ql_update_small_bufq_prod_index(qdev); 2197 ql_update_lrg_bufq_prod_index(qdev); 2198 writel(qdev->rsp_consumer_index, 2199 &port_regs->CommonRegs.rspQConsumerIndex); 2200 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2201 2202 ql_enable_interrupts(qdev); 2203 } 2204 return tx_cleaned + rx_cleaned; 2205 } 2206 2207 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2208 { 2209 2210 struct net_device *ndev = dev_id; 2211 struct ql3_adapter *qdev = netdev_priv(ndev); 2212 struct ql3xxx_port_registers __iomem *port_regs = 2213 qdev->mem_map_registers; 2214 u32 value; 2215 int handled = 1; 2216 u32 var; 2217 2218 value = ql_read_common_reg_l(qdev, 2219 &port_regs->CommonRegs.ispControlStatus); 2220 2221 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2222 spin_lock(&qdev->adapter_lock); 2223 netif_stop_queue(qdev->ndev); 2224 netif_carrier_off(qdev->ndev); 2225 ql_disable_interrupts(qdev); 2226 qdev->port_link_state = LS_DOWN; 2227 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2228 2229 if (value & ISP_CONTROL_FE) { 2230 /* 2231 * Chip Fatal Error. 2232 */ 2233 var = 2234 ql_read_page0_reg_l(qdev, 2235 &port_regs->PortFatalErrStatus); 2236 netdev_warn(ndev, 2237 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2238 var); 2239 set_bit(QL_RESET_START, &qdev->flags) ; 2240 } else { 2241 /* 2242 * Soft Reset Requested. 2243 */ 2244 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2245 netdev_err(ndev, 2246 "Another function issued a reset to the chip. ISR value = %x\n", 2247 value); 2248 } 2249 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2250 spin_unlock(&qdev->adapter_lock); 2251 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2252 ql_disable_interrupts(qdev); 2253 if (likely(napi_schedule_prep(&qdev->napi))) 2254 __napi_schedule(&qdev->napi); 2255 } else 2256 return IRQ_NONE; 2257 2258 return IRQ_RETVAL(handled); 2259 } 2260 2261 /* 2262 * Get the total number of segments needed for the given number of fragments. 2263 * This is necessary because outbound address lists (OAL) will be used when 2264 * more than two frags are given. Each address list has 5 addr/len pairs. 2265 * The 5th pair in each OAL is used to point to the next OAL if more frags 2266 * are coming. That is why the frags:segment count ratio is not linear. 2267 */ 2268 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2269 { 2270 if (qdev->device_id == QL3022_DEVICE_ID) 2271 return 1; 2272 2273 if (frags <= 2) 2274 return frags + 1; 2275 else if (frags <= 6) 2276 return frags + 2; 2277 else if (frags <= 10) 2278 return frags + 3; 2279 else if (frags <= 14) 2280 return frags + 4; 2281 else if (frags <= 18) 2282 return frags + 5; 2283 return -1; 2284 } 2285 2286 static void ql_hw_csum_setup(const struct sk_buff *skb, 2287 struct ob_mac_iocb_req *mac_iocb_ptr) 2288 { 2289 const struct iphdr *ip = ip_hdr(skb); 2290 2291 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2292 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2293 2294 if (ip->protocol == IPPROTO_TCP) { 2295 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2296 OB_3032MAC_IOCB_REQ_IC; 2297 } else { 2298 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2299 OB_3032MAC_IOCB_REQ_IC; 2300 } 2301 2302 } 2303 2304 /* 2305 * Map the buffers for this transmit. 2306 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2307 */ 2308 static int ql_send_map(struct ql3_adapter *qdev, 2309 struct ob_mac_iocb_req *mac_iocb_ptr, 2310 struct ql_tx_buf_cb *tx_cb, 2311 struct sk_buff *skb) 2312 { 2313 struct oal *oal; 2314 struct oal_entry *oal_entry; 2315 int len = skb_headlen(skb); 2316 dma_addr_t map; 2317 int err; 2318 int completed_segs, i; 2319 int seg_cnt, seg = 0; 2320 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2321 2322 seg_cnt = tx_cb->seg_count; 2323 /* 2324 * Map the skb buffer first. 2325 */ 2326 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2327 2328 err = pci_dma_mapping_error(qdev->pdev, map); 2329 if (err) { 2330 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2331 err); 2332 2333 return NETDEV_TX_BUSY; 2334 } 2335 2336 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2337 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2338 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2339 oal_entry->len = cpu_to_le32(len); 2340 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2341 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2342 seg++; 2343 2344 if (seg_cnt == 1) { 2345 /* Terminate the last segment. */ 2346 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2347 return NETDEV_TX_OK; 2348 } 2349 oal = tx_cb->oal; 2350 for (completed_segs = 0; 2351 completed_segs < frag_cnt; 2352 completed_segs++, seg++) { 2353 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2354 oal_entry++; 2355 /* 2356 * Check for continuation requirements. 2357 * It's strange but necessary. 2358 * Continuation entry points to outbound address list. 2359 */ 2360 if ((seg == 2 && seg_cnt > 3) || 2361 (seg == 7 && seg_cnt > 8) || 2362 (seg == 12 && seg_cnt > 13) || 2363 (seg == 17 && seg_cnt > 18)) { 2364 map = pci_map_single(qdev->pdev, oal, 2365 sizeof(struct oal), 2366 PCI_DMA_TODEVICE); 2367 2368 err = pci_dma_mapping_error(qdev->pdev, map); 2369 if (err) { 2370 netdev_err(qdev->ndev, 2371 "PCI mapping outbound address list with error: %d\n", 2372 err); 2373 goto map_error; 2374 } 2375 2376 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2377 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2378 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2379 OAL_CONT_ENTRY); 2380 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2381 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2382 sizeof(struct oal)); 2383 oal_entry = (struct oal_entry *)oal; 2384 oal++; 2385 seg++; 2386 } 2387 2388 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2389 DMA_TO_DEVICE); 2390 2391 err = dma_mapping_error(&qdev->pdev->dev, map); 2392 if (err) { 2393 netdev_err(qdev->ndev, 2394 "PCI mapping frags failed with error: %d\n", 2395 err); 2396 goto map_error; 2397 } 2398 2399 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2400 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2401 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2402 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2403 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2404 } 2405 /* Terminate the last segment. */ 2406 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2407 return NETDEV_TX_OK; 2408 2409 map_error: 2410 /* A PCI mapping failed and now we will need to back out 2411 * We need to traverse through the oal's and associated pages which 2412 * have been mapped and now we must unmap them to clean up properly 2413 */ 2414 2415 seg = 1; 2416 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2417 oal = tx_cb->oal; 2418 for (i = 0; i < completed_segs; i++, seg++) { 2419 oal_entry++; 2420 2421 /* 2422 * Check for continuation requirements. 2423 * It's strange but necessary. 2424 */ 2425 2426 if ((seg == 2 && seg_cnt > 3) || 2427 (seg == 7 && seg_cnt > 8) || 2428 (seg == 12 && seg_cnt > 13) || 2429 (seg == 17 && seg_cnt > 18)) { 2430 pci_unmap_single(qdev->pdev, 2431 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2432 dma_unmap_len(&tx_cb->map[seg], maplen), 2433 PCI_DMA_TODEVICE); 2434 oal++; 2435 seg++; 2436 } 2437 2438 pci_unmap_page(qdev->pdev, 2439 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2440 dma_unmap_len(&tx_cb->map[seg], maplen), 2441 PCI_DMA_TODEVICE); 2442 } 2443 2444 pci_unmap_single(qdev->pdev, 2445 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2446 dma_unmap_addr(&tx_cb->map[0], maplen), 2447 PCI_DMA_TODEVICE); 2448 2449 return NETDEV_TX_BUSY; 2450 2451 } 2452 2453 /* 2454 * The difference between 3022 and 3032 sends: 2455 * 3022 only supports a simple single segment transmission. 2456 * 3032 supports checksumming and scatter/gather lists (fragments). 2457 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2458 * in the IOCB plus a chain of outbound address lists (OAL) that 2459 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2460 * will be used to point to an OAL when more ALP entries are required. 2461 * The IOCB is always the top of the chain followed by one or more 2462 * OALs (when necessary). 2463 */ 2464 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2465 struct net_device *ndev) 2466 { 2467 struct ql3_adapter *qdev = netdev_priv(ndev); 2468 struct ql3xxx_port_registers __iomem *port_regs = 2469 qdev->mem_map_registers; 2470 struct ql_tx_buf_cb *tx_cb; 2471 u32 tot_len = skb->len; 2472 struct ob_mac_iocb_req *mac_iocb_ptr; 2473 2474 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2475 return NETDEV_TX_BUSY; 2476 2477 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2478 tx_cb->seg_count = ql_get_seg_count(qdev, 2479 skb_shinfo(skb)->nr_frags); 2480 if (tx_cb->seg_count == -1) { 2481 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2482 return NETDEV_TX_OK; 2483 } 2484 2485 mac_iocb_ptr = tx_cb->queue_entry; 2486 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2487 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2488 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2489 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2490 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2491 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2492 tx_cb->skb = skb; 2493 if (qdev->device_id == QL3032_DEVICE_ID && 2494 skb->ip_summed == CHECKSUM_PARTIAL) 2495 ql_hw_csum_setup(skb, mac_iocb_ptr); 2496 2497 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2498 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2499 return NETDEV_TX_BUSY; 2500 } 2501 2502 wmb(); 2503 qdev->req_producer_index++; 2504 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2505 qdev->req_producer_index = 0; 2506 wmb(); 2507 ql_write_common_reg_l(qdev, 2508 &port_regs->CommonRegs.reqQProducerIndex, 2509 qdev->req_producer_index); 2510 2511 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2512 "tx queued, slot %d, len %d\n", 2513 qdev->req_producer_index, skb->len); 2514 2515 atomic_dec(&qdev->tx_count); 2516 return NETDEV_TX_OK; 2517 } 2518 2519 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2520 { 2521 qdev->req_q_size = 2522 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2523 2524 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2525 2526 /* The barrier is required to ensure request and response queue 2527 * addr writes to the registers. 2528 */ 2529 wmb(); 2530 2531 qdev->req_q_virt_addr = 2532 pci_alloc_consistent(qdev->pdev, 2533 (size_t) qdev->req_q_size, 2534 &qdev->req_q_phy_addr); 2535 2536 if ((qdev->req_q_virt_addr == NULL) || 2537 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2538 netdev_err(qdev->ndev, "reqQ failed\n"); 2539 return -ENOMEM; 2540 } 2541 2542 qdev->rsp_q_virt_addr = 2543 pci_alloc_consistent(qdev->pdev, 2544 (size_t) qdev->rsp_q_size, 2545 &qdev->rsp_q_phy_addr); 2546 2547 if ((qdev->rsp_q_virt_addr == NULL) || 2548 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2549 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2550 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2551 qdev->req_q_virt_addr, 2552 qdev->req_q_phy_addr); 2553 return -ENOMEM; 2554 } 2555 2556 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2557 2558 return 0; 2559 } 2560 2561 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2562 { 2563 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2564 netdev_info(qdev->ndev, "Already done\n"); 2565 return; 2566 } 2567 2568 pci_free_consistent(qdev->pdev, 2569 qdev->req_q_size, 2570 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2571 2572 qdev->req_q_virt_addr = NULL; 2573 2574 pci_free_consistent(qdev->pdev, 2575 qdev->rsp_q_size, 2576 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2577 2578 qdev->rsp_q_virt_addr = NULL; 2579 2580 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2581 } 2582 2583 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2584 { 2585 /* Create Large Buffer Queue */ 2586 qdev->lrg_buf_q_size = 2587 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2588 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2589 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2590 else 2591 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2592 2593 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2594 sizeof(struct ql_rcv_buf_cb), 2595 GFP_KERNEL); 2596 if (qdev->lrg_buf == NULL) 2597 return -ENOMEM; 2598 2599 qdev->lrg_buf_q_alloc_virt_addr = 2600 pci_alloc_consistent(qdev->pdev, 2601 qdev->lrg_buf_q_alloc_size, 2602 &qdev->lrg_buf_q_alloc_phy_addr); 2603 2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2605 netdev_err(qdev->ndev, "lBufQ failed\n"); 2606 return -ENOMEM; 2607 } 2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2609 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2610 2611 /* Create Small Buffer Queue */ 2612 qdev->small_buf_q_size = 2613 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2614 if (qdev->small_buf_q_size < PAGE_SIZE) 2615 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2616 else 2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2618 2619 qdev->small_buf_q_alloc_virt_addr = 2620 pci_alloc_consistent(qdev->pdev, 2621 qdev->small_buf_q_alloc_size, 2622 &qdev->small_buf_q_alloc_phy_addr); 2623 2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2627 qdev->lrg_buf_q_alloc_virt_addr, 2628 qdev->lrg_buf_q_alloc_phy_addr); 2629 return -ENOMEM; 2630 } 2631 2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2635 return 0; 2636 } 2637 2638 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2639 { 2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2641 netdev_info(qdev->ndev, "Already done\n"); 2642 return; 2643 } 2644 kfree(qdev->lrg_buf); 2645 pci_free_consistent(qdev->pdev, 2646 qdev->lrg_buf_q_alloc_size, 2647 qdev->lrg_buf_q_alloc_virt_addr, 2648 qdev->lrg_buf_q_alloc_phy_addr); 2649 2650 qdev->lrg_buf_q_virt_addr = NULL; 2651 2652 pci_free_consistent(qdev->pdev, 2653 qdev->small_buf_q_alloc_size, 2654 qdev->small_buf_q_alloc_virt_addr, 2655 qdev->small_buf_q_alloc_phy_addr); 2656 2657 qdev->small_buf_q_virt_addr = NULL; 2658 2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2660 } 2661 2662 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2663 { 2664 int i; 2665 struct bufq_addr_element *small_buf_q_entry; 2666 2667 /* Currently we allocate on one of memory and use it for smallbuffers */ 2668 qdev->small_buf_total_size = 2669 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2670 QL_SMALL_BUFFER_SIZE); 2671 2672 qdev->small_buf_virt_addr = 2673 pci_alloc_consistent(qdev->pdev, 2674 qdev->small_buf_total_size, 2675 &qdev->small_buf_phy_addr); 2676 2677 if (qdev->small_buf_virt_addr == NULL) { 2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2679 return -ENOMEM; 2680 } 2681 2682 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2683 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2684 2685 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2686 2687 /* Initialize the small buffer queue. */ 2688 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2689 small_buf_q_entry->addr_high = 2690 cpu_to_le32(qdev->small_buf_phy_addr_high); 2691 small_buf_q_entry->addr_low = 2692 cpu_to_le32(qdev->small_buf_phy_addr_low + 2693 (i * QL_SMALL_BUFFER_SIZE)); 2694 small_buf_q_entry++; 2695 } 2696 qdev->small_buf_index = 0; 2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2698 return 0; 2699 } 2700 2701 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2702 { 2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2704 netdev_info(qdev->ndev, "Already done\n"); 2705 return; 2706 } 2707 if (qdev->small_buf_virt_addr != NULL) { 2708 pci_free_consistent(qdev->pdev, 2709 qdev->small_buf_total_size, 2710 qdev->small_buf_virt_addr, 2711 qdev->small_buf_phy_addr); 2712 2713 qdev->small_buf_virt_addr = NULL; 2714 } 2715 } 2716 2717 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2718 { 2719 int i = 0; 2720 struct ql_rcv_buf_cb *lrg_buf_cb; 2721 2722 for (i = 0; i < qdev->num_large_buffers; i++) { 2723 lrg_buf_cb = &qdev->lrg_buf[i]; 2724 if (lrg_buf_cb->skb) { 2725 dev_kfree_skb(lrg_buf_cb->skb); 2726 pci_unmap_single(qdev->pdev, 2727 dma_unmap_addr(lrg_buf_cb, mapaddr), 2728 dma_unmap_len(lrg_buf_cb, maplen), 2729 PCI_DMA_FROMDEVICE); 2730 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2731 } else { 2732 break; 2733 } 2734 } 2735 } 2736 2737 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2738 { 2739 int i; 2740 struct ql_rcv_buf_cb *lrg_buf_cb; 2741 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2742 2743 for (i = 0; i < qdev->num_large_buffers; i++) { 2744 lrg_buf_cb = &qdev->lrg_buf[i]; 2745 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2746 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2747 buf_addr_ele++; 2748 } 2749 qdev->lrg_buf_index = 0; 2750 qdev->lrg_buf_skb_check = 0; 2751 } 2752 2753 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2754 { 2755 int i; 2756 struct ql_rcv_buf_cb *lrg_buf_cb; 2757 struct sk_buff *skb; 2758 dma_addr_t map; 2759 int err; 2760 2761 for (i = 0; i < qdev->num_large_buffers; i++) { 2762 skb = netdev_alloc_skb(qdev->ndev, 2763 qdev->lrg_buffer_len); 2764 if (unlikely(!skb)) { 2765 /* Better luck next round */ 2766 netdev_err(qdev->ndev, 2767 "large buff alloc failed for %d bytes at index %d\n", 2768 qdev->lrg_buffer_len * 2, i); 2769 ql_free_large_buffers(qdev); 2770 return -ENOMEM; 2771 } else { 2772 2773 lrg_buf_cb = &qdev->lrg_buf[i]; 2774 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2775 lrg_buf_cb->index = i; 2776 lrg_buf_cb->skb = skb; 2777 /* 2778 * We save some space to copy the ethhdr from first 2779 * buffer 2780 */ 2781 skb_reserve(skb, QL_HEADER_SPACE); 2782 map = pci_map_single(qdev->pdev, 2783 skb->data, 2784 qdev->lrg_buffer_len - 2785 QL_HEADER_SPACE, 2786 PCI_DMA_FROMDEVICE); 2787 2788 err = pci_dma_mapping_error(qdev->pdev, map); 2789 if (err) { 2790 netdev_err(qdev->ndev, 2791 "PCI mapping failed with error: %d\n", 2792 err); 2793 ql_free_large_buffers(qdev); 2794 return -ENOMEM; 2795 } 2796 2797 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2798 dma_unmap_len_set(lrg_buf_cb, maplen, 2799 qdev->lrg_buffer_len - 2800 QL_HEADER_SPACE); 2801 lrg_buf_cb->buf_phy_addr_low = 2802 cpu_to_le32(LS_64BITS(map)); 2803 lrg_buf_cb->buf_phy_addr_high = 2804 cpu_to_le32(MS_64BITS(map)); 2805 } 2806 } 2807 return 0; 2808 } 2809 2810 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2811 { 2812 struct ql_tx_buf_cb *tx_cb; 2813 int i; 2814 2815 tx_cb = &qdev->tx_buf[0]; 2816 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2817 kfree(tx_cb->oal); 2818 tx_cb->oal = NULL; 2819 tx_cb++; 2820 } 2821 } 2822 2823 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2824 { 2825 struct ql_tx_buf_cb *tx_cb; 2826 int i; 2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2828 2829 /* Create free list of transmit buffers */ 2830 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2831 2832 tx_cb = &qdev->tx_buf[i]; 2833 tx_cb->skb = NULL; 2834 tx_cb->queue_entry = req_q_curr; 2835 req_q_curr++; 2836 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2837 if (tx_cb->oal == NULL) 2838 return -ENOMEM; 2839 } 2840 return 0; 2841 } 2842 2843 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2844 { 2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2849 /* 2850 * Bigger buffers, so less of them. 2851 */ 2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2854 } else { 2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2857 return -ENOMEM; 2858 } 2859 qdev->num_large_buffers = 2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2862 qdev->max_frame_size = 2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2864 2865 /* 2866 * First allocate a page of shared memory and use it for shadow 2867 * locations of Network Request Queue Consumer Address Register and 2868 * Network Completion Queue Producer Index Register 2869 */ 2870 qdev->shadow_reg_virt_addr = 2871 pci_alloc_consistent(qdev->pdev, 2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2873 2874 if (qdev->shadow_reg_virt_addr != NULL) { 2875 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2876 qdev->req_consumer_index_phy_addr_high = 2877 MS_64BITS(qdev->shadow_reg_phy_addr); 2878 qdev->req_consumer_index_phy_addr_low = 2879 LS_64BITS(qdev->shadow_reg_phy_addr); 2880 2881 qdev->prsp_producer_index = 2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2883 qdev->rsp_producer_index_phy_addr_high = 2884 qdev->req_consumer_index_phy_addr_high; 2885 qdev->rsp_producer_index_phy_addr_low = 2886 qdev->req_consumer_index_phy_addr_low + 8; 2887 } else { 2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2889 return -ENOMEM; 2890 } 2891 2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2894 goto err_req_rsp; 2895 } 2896 2897 if (ql_alloc_buffer_queues(qdev) != 0) { 2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2899 goto err_buffer_queues; 2900 } 2901 2902 if (ql_alloc_small_buffers(qdev) != 0) { 2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2904 goto err_small_buffers; 2905 } 2906 2907 if (ql_alloc_large_buffers(qdev) != 0) { 2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2909 goto err_small_buffers; 2910 } 2911 2912 /* Initialize the large buffer queue. */ 2913 ql_init_large_buffers(qdev); 2914 if (ql_create_send_free_list(qdev)) 2915 goto err_free_list; 2916 2917 qdev->rsp_current = qdev->rsp_q_virt_addr; 2918 2919 return 0; 2920 err_free_list: 2921 ql_free_send_free_list(qdev); 2922 err_small_buffers: 2923 ql_free_buffer_queues(qdev); 2924 err_buffer_queues: 2925 ql_free_net_req_rsp_queues(qdev); 2926 err_req_rsp: 2927 pci_free_consistent(qdev->pdev, 2928 PAGE_SIZE, 2929 qdev->shadow_reg_virt_addr, 2930 qdev->shadow_reg_phy_addr); 2931 2932 return -ENOMEM; 2933 } 2934 2935 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2936 { 2937 ql_free_send_free_list(qdev); 2938 ql_free_large_buffers(qdev); 2939 ql_free_small_buffers(qdev); 2940 ql_free_buffer_queues(qdev); 2941 ql_free_net_req_rsp_queues(qdev); 2942 if (qdev->shadow_reg_virt_addr != NULL) { 2943 pci_free_consistent(qdev->pdev, 2944 PAGE_SIZE, 2945 qdev->shadow_reg_virt_addr, 2946 qdev->shadow_reg_phy_addr); 2947 qdev->shadow_reg_virt_addr = NULL; 2948 } 2949 } 2950 2951 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2952 { 2953 struct ql3xxx_local_ram_registers __iomem *local_ram = 2954 (void __iomem *)qdev->mem_map_registers; 2955 2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2958 2) << 4)) 2959 return -1; 2960 2961 ql_write_page2_reg(qdev, 2962 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2963 2964 ql_write_page2_reg(qdev, 2965 &local_ram->maxBufletCount, 2966 qdev->nvram_data.bufletCount); 2967 2968 ql_write_page2_reg(qdev, 2969 &local_ram->freeBufletThresholdLow, 2970 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2971 (qdev->nvram_data.tcpWindowThreshold0)); 2972 2973 ql_write_page2_reg(qdev, 2974 &local_ram->freeBufletThresholdHigh, 2975 qdev->nvram_data.tcpWindowThreshold50); 2976 2977 ql_write_page2_reg(qdev, 2978 &local_ram->ipHashTableBase, 2979 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2980 qdev->nvram_data.ipHashTableBaseLo); 2981 ql_write_page2_reg(qdev, 2982 &local_ram->ipHashTableCount, 2983 qdev->nvram_data.ipHashTableSize); 2984 ql_write_page2_reg(qdev, 2985 &local_ram->tcpHashTableBase, 2986 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2987 qdev->nvram_data.tcpHashTableBaseLo); 2988 ql_write_page2_reg(qdev, 2989 &local_ram->tcpHashTableCount, 2990 qdev->nvram_data.tcpHashTableSize); 2991 ql_write_page2_reg(qdev, 2992 &local_ram->ncbBase, 2993 (qdev->nvram_data.ncbTableBaseHi << 16) | 2994 qdev->nvram_data.ncbTableBaseLo); 2995 ql_write_page2_reg(qdev, 2996 &local_ram->maxNcbCount, 2997 qdev->nvram_data.ncbTableSize); 2998 ql_write_page2_reg(qdev, 2999 &local_ram->drbBase, 3000 (qdev->nvram_data.drbTableBaseHi << 16) | 3001 qdev->nvram_data.drbTableBaseLo); 3002 ql_write_page2_reg(qdev, 3003 &local_ram->maxDrbCount, 3004 qdev->nvram_data.drbTableSize); 3005 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3006 return 0; 3007 } 3008 3009 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3010 { 3011 u32 value; 3012 struct ql3xxx_port_registers __iomem *port_regs = 3013 qdev->mem_map_registers; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3016 (void __iomem *)port_regs; 3017 u32 delay = 10; 3018 int status = 0; 3019 3020 if (ql_mii_setup(qdev)) 3021 return -1; 3022 3023 /* Bring out PHY out of reset */ 3024 ql_write_common_reg(qdev, spir, 3025 (ISP_SERIAL_PORT_IF_WE | 3026 (ISP_SERIAL_PORT_IF_WE << 16))); 3027 /* Give the PHY time to come out of reset. */ 3028 mdelay(100); 3029 qdev->port_link_state = LS_DOWN; 3030 netif_carrier_off(qdev->ndev); 3031 3032 /* V2 chip fix for ARS-39168. */ 3033 ql_write_common_reg(qdev, spir, 3034 (ISP_SERIAL_PORT_IF_SDE | 3035 (ISP_SERIAL_PORT_IF_SDE << 16))); 3036 3037 /* Request Queue Registers */ 3038 *((u32 *)(qdev->preq_consumer_index)) = 0; 3039 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3040 qdev->req_producer_index = 0; 3041 3042 ql_write_page1_reg(qdev, 3043 &hmem_regs->reqConsumerIndexAddrHigh, 3044 qdev->req_consumer_index_phy_addr_high); 3045 ql_write_page1_reg(qdev, 3046 &hmem_regs->reqConsumerIndexAddrLow, 3047 qdev->req_consumer_index_phy_addr_low); 3048 3049 ql_write_page1_reg(qdev, 3050 &hmem_regs->reqBaseAddrHigh, 3051 MS_64BITS(qdev->req_q_phy_addr)); 3052 ql_write_page1_reg(qdev, 3053 &hmem_regs->reqBaseAddrLow, 3054 LS_64BITS(qdev->req_q_phy_addr)); 3055 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3056 3057 /* Response Queue Registers */ 3058 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3059 qdev->rsp_consumer_index = 0; 3060 qdev->rsp_current = qdev->rsp_q_virt_addr; 3061 3062 ql_write_page1_reg(qdev, 3063 &hmem_regs->rspProducerIndexAddrHigh, 3064 qdev->rsp_producer_index_phy_addr_high); 3065 3066 ql_write_page1_reg(qdev, 3067 &hmem_regs->rspProducerIndexAddrLow, 3068 qdev->rsp_producer_index_phy_addr_low); 3069 3070 ql_write_page1_reg(qdev, 3071 &hmem_regs->rspBaseAddrHigh, 3072 MS_64BITS(qdev->rsp_q_phy_addr)); 3073 3074 ql_write_page1_reg(qdev, 3075 &hmem_regs->rspBaseAddrLow, 3076 LS_64BITS(qdev->rsp_q_phy_addr)); 3077 3078 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3079 3080 /* Large Buffer Queue */ 3081 ql_write_page1_reg(qdev, 3082 &hmem_regs->rxLargeQBaseAddrHigh, 3083 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3084 3085 ql_write_page1_reg(qdev, 3086 &hmem_regs->rxLargeQBaseAddrLow, 3087 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3088 3089 ql_write_page1_reg(qdev, 3090 &hmem_regs->rxLargeQLength, 3091 qdev->num_lbufq_entries); 3092 3093 ql_write_page1_reg(qdev, 3094 &hmem_regs->rxLargeBufferLength, 3095 qdev->lrg_buffer_len); 3096 3097 /* Small Buffer Queue */ 3098 ql_write_page1_reg(qdev, 3099 &hmem_regs->rxSmallQBaseAddrHigh, 3100 MS_64BITS(qdev->small_buf_q_phy_addr)); 3101 3102 ql_write_page1_reg(qdev, 3103 &hmem_regs->rxSmallQBaseAddrLow, 3104 LS_64BITS(qdev->small_buf_q_phy_addr)); 3105 3106 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3107 ql_write_page1_reg(qdev, 3108 &hmem_regs->rxSmallBufferLength, 3109 QL_SMALL_BUFFER_SIZE); 3110 3111 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3112 qdev->small_buf_release_cnt = 8; 3113 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3114 qdev->lrg_buf_release_cnt = 8; 3115 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3116 qdev->small_buf_index = 0; 3117 qdev->lrg_buf_index = 0; 3118 qdev->lrg_buf_free_count = 0; 3119 qdev->lrg_buf_free_head = NULL; 3120 qdev->lrg_buf_free_tail = NULL; 3121 3122 ql_write_common_reg(qdev, 3123 &port_regs->CommonRegs. 3124 rxSmallQProducerIndex, 3125 qdev->small_buf_q_producer_index); 3126 ql_write_common_reg(qdev, 3127 &port_regs->CommonRegs. 3128 rxLargeQProducerIndex, 3129 qdev->lrg_buf_q_producer_index); 3130 3131 /* 3132 * Find out if the chip has already been initialized. If it has, then 3133 * we skip some of the initialization. 3134 */ 3135 clear_bit(QL_LINK_MASTER, &qdev->flags); 3136 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3137 if ((value & PORT_STATUS_IC) == 0) { 3138 3139 /* Chip has not been configured yet, so let it rip. */ 3140 if (ql_init_misc_registers(qdev)) { 3141 status = -1; 3142 goto out; 3143 } 3144 3145 value = qdev->nvram_data.tcpMaxWindowSize; 3146 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3147 3148 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3149 3150 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3152 * 2) << 13)) { 3153 status = -1; 3154 goto out; 3155 } 3156 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3157 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3158 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3159 16) | (INTERNAL_CHIP_SD | 3160 INTERNAL_CHIP_WE))); 3161 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3162 } 3163 3164 if (qdev->mac_index) 3165 ql_write_page0_reg(qdev, 3166 &port_regs->mac1MaxFrameLengthReg, 3167 qdev->max_frame_size); 3168 else 3169 ql_write_page0_reg(qdev, 3170 &port_regs->mac0MaxFrameLengthReg, 3171 qdev->max_frame_size); 3172 3173 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3174 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3175 2) << 7)) { 3176 status = -1; 3177 goto out; 3178 } 3179 3180 PHY_Setup(qdev); 3181 ql_init_scan_mode(qdev); 3182 ql_get_phy_owner(qdev); 3183 3184 /* Load the MAC Configuration */ 3185 3186 /* Program lower 32 bits of the MAC address */ 3187 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3188 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3189 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3190 ((qdev->ndev->dev_addr[2] << 24) 3191 | (qdev->ndev->dev_addr[3] << 16) 3192 | (qdev->ndev->dev_addr[4] << 8) 3193 | qdev->ndev->dev_addr[5])); 3194 3195 /* Program top 16 bits of the MAC address */ 3196 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3197 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3198 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3199 ((qdev->ndev->dev_addr[0] << 8) 3200 | qdev->ndev->dev_addr[1])); 3201 3202 /* Enable Primary MAC */ 3203 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3204 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3205 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3206 3207 /* Clear Primary and Secondary IP addresses */ 3208 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3209 ((IP_ADDR_INDEX_REG_MASK << 16) | 3210 (qdev->mac_index << 2))); 3211 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3212 3213 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3214 ((IP_ADDR_INDEX_REG_MASK << 16) | 3215 ((qdev->mac_index << 2) + 1))); 3216 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3217 3218 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3219 3220 /* Indicate Configuration Complete */ 3221 ql_write_page0_reg(qdev, 3222 &port_regs->portControl, 3223 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3224 3225 do { 3226 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3227 if (value & PORT_STATUS_IC) 3228 break; 3229 spin_unlock_irq(&qdev->hw_lock); 3230 msleep(500); 3231 spin_lock_irq(&qdev->hw_lock); 3232 } while (--delay); 3233 3234 if (delay == 0) { 3235 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3236 status = -1; 3237 goto out; 3238 } 3239 3240 /* Enable Ethernet Function */ 3241 if (qdev->device_id == QL3032_DEVICE_ID) { 3242 value = 3243 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3244 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3245 QL3032_PORT_CONTROL_ET); 3246 ql_write_page0_reg(qdev, &port_regs->functionControl, 3247 ((value << 16) | value)); 3248 } else { 3249 value = 3250 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3251 PORT_CONTROL_HH); 3252 ql_write_page0_reg(qdev, &port_regs->portControl, 3253 ((value << 16) | value)); 3254 } 3255 3256 3257 out: 3258 return status; 3259 } 3260 3261 /* 3262 * Caller holds hw_lock. 3263 */ 3264 static int ql_adapter_reset(struct ql3_adapter *qdev) 3265 { 3266 struct ql3xxx_port_registers __iomem *port_regs = 3267 qdev->mem_map_registers; 3268 int status = 0; 3269 u16 value; 3270 int max_wait_time; 3271 3272 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3273 clear_bit(QL_RESET_DONE, &qdev->flags); 3274 3275 /* 3276 * Issue soft reset to chip. 3277 */ 3278 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3279 ql_write_common_reg(qdev, 3280 &port_regs->CommonRegs.ispControlStatus, 3281 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3282 3283 /* Wait 3 seconds for reset to complete. */ 3284 netdev_printk(KERN_DEBUG, qdev->ndev, 3285 "Wait 10 milliseconds for reset to complete\n"); 3286 3287 /* Wait until the firmware tells us the Soft Reset is done */ 3288 max_wait_time = 5; 3289 do { 3290 value = 3291 ql_read_common_reg(qdev, 3292 &port_regs->CommonRegs.ispControlStatus); 3293 if ((value & ISP_CONTROL_SR) == 0) 3294 break; 3295 3296 ssleep(1); 3297 } while ((--max_wait_time)); 3298 3299 /* 3300 * Also, make sure that the Network Reset Interrupt bit has been 3301 * cleared after the soft reset has taken place. 3302 */ 3303 value = 3304 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3305 if (value & ISP_CONTROL_RI) { 3306 netdev_printk(KERN_DEBUG, qdev->ndev, 3307 "clearing RI after reset\n"); 3308 ql_write_common_reg(qdev, 3309 &port_regs->CommonRegs. 3310 ispControlStatus, 3311 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3312 } 3313 3314 if (max_wait_time == 0) { 3315 /* Issue Force Soft Reset */ 3316 ql_write_common_reg(qdev, 3317 &port_regs->CommonRegs. 3318 ispControlStatus, 3319 ((ISP_CONTROL_FSR << 16) | 3320 ISP_CONTROL_FSR)); 3321 /* 3322 * Wait until the firmware tells us the Force Soft Reset is 3323 * done 3324 */ 3325 max_wait_time = 5; 3326 do { 3327 value = ql_read_common_reg(qdev, 3328 &port_regs->CommonRegs. 3329 ispControlStatus); 3330 if ((value & ISP_CONTROL_FSR) == 0) 3331 break; 3332 ssleep(1); 3333 } while ((--max_wait_time)); 3334 } 3335 if (max_wait_time == 0) 3336 status = 1; 3337 3338 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3339 set_bit(QL_RESET_DONE, &qdev->flags); 3340 return status; 3341 } 3342 3343 static void ql_set_mac_info(struct ql3_adapter *qdev) 3344 { 3345 struct ql3xxx_port_registers __iomem *port_regs = 3346 qdev->mem_map_registers; 3347 u32 value, port_status; 3348 u8 func_number; 3349 3350 /* Get the function number */ 3351 value = 3352 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3353 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3354 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3355 switch (value & ISP_CONTROL_FN_MASK) { 3356 case ISP_CONTROL_FN0_NET: 3357 qdev->mac_index = 0; 3358 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3359 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3360 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3361 if (port_status & PORT_STATUS_SM0) 3362 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3363 else 3364 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3365 break; 3366 3367 case ISP_CONTROL_FN1_NET: 3368 qdev->mac_index = 1; 3369 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3370 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3371 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3372 if (port_status & PORT_STATUS_SM1) 3373 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3374 else 3375 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3376 break; 3377 3378 case ISP_CONTROL_FN0_SCSI: 3379 case ISP_CONTROL_FN1_SCSI: 3380 default: 3381 netdev_printk(KERN_DEBUG, qdev->ndev, 3382 "Invalid function number, ispControlStatus = 0x%x\n", 3383 value); 3384 break; 3385 } 3386 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3387 } 3388 3389 static void ql_display_dev_info(struct net_device *ndev) 3390 { 3391 struct ql3_adapter *qdev = netdev_priv(ndev); 3392 struct pci_dev *pdev = qdev->pdev; 3393 3394 netdev_info(ndev, 3395 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3396 DRV_NAME, qdev->index, qdev->chip_rev_id, 3397 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3398 qdev->pci_slot); 3399 netdev_info(ndev, "%s Interface\n", 3400 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3401 3402 /* 3403 * Print PCI bus width/type. 3404 */ 3405 netdev_info(ndev, "Bus interface is %s %s\n", 3406 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3407 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3408 3409 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3410 qdev->mem_map_registers); 3411 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3412 3413 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3414 } 3415 3416 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3417 { 3418 struct net_device *ndev = qdev->ndev; 3419 int retval = 0; 3420 3421 netif_stop_queue(ndev); 3422 netif_carrier_off(ndev); 3423 3424 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3425 clear_bit(QL_LINK_MASTER, &qdev->flags); 3426 3427 ql_disable_interrupts(qdev); 3428 3429 free_irq(qdev->pdev->irq, ndev); 3430 3431 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3432 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3433 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3434 pci_disable_msi(qdev->pdev); 3435 } 3436 3437 del_timer_sync(&qdev->adapter_timer); 3438 3439 napi_disable(&qdev->napi); 3440 3441 if (do_reset) { 3442 int soft_reset; 3443 unsigned long hw_flags; 3444 3445 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3446 if (ql_wait_for_drvr_lock(qdev)) { 3447 soft_reset = ql_adapter_reset(qdev); 3448 if (soft_reset) { 3449 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3450 qdev->index); 3451 } 3452 netdev_err(ndev, 3453 "Releasing driver lock via chip reset\n"); 3454 } else { 3455 netdev_err(ndev, 3456 "Could not acquire driver lock to do reset!\n"); 3457 retval = -1; 3458 } 3459 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3460 } 3461 ql_free_mem_resources(qdev); 3462 return retval; 3463 } 3464 3465 static int ql_adapter_up(struct ql3_adapter *qdev) 3466 { 3467 struct net_device *ndev = qdev->ndev; 3468 int err; 3469 unsigned long irq_flags = IRQF_SHARED; 3470 unsigned long hw_flags; 3471 3472 if (ql_alloc_mem_resources(qdev)) { 3473 netdev_err(ndev, "Unable to allocate buffers\n"); 3474 return -ENOMEM; 3475 } 3476 3477 if (qdev->msi) { 3478 if (pci_enable_msi(qdev->pdev)) { 3479 netdev_err(ndev, 3480 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3481 qdev->msi = 0; 3482 } else { 3483 netdev_info(ndev, "MSI Enabled...\n"); 3484 set_bit(QL_MSI_ENABLED, &qdev->flags); 3485 irq_flags &= ~IRQF_SHARED; 3486 } 3487 } 3488 3489 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3490 irq_flags, ndev->name, ndev); 3491 if (err) { 3492 netdev_err(ndev, 3493 "Failed to reserve interrupt %d - already in use\n", 3494 qdev->pdev->irq); 3495 goto err_irq; 3496 } 3497 3498 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3499 3500 err = ql_wait_for_drvr_lock(qdev); 3501 if (err) { 3502 err = ql_adapter_initialize(qdev); 3503 if (err) { 3504 netdev_err(ndev, "Unable to initialize adapter\n"); 3505 goto err_init; 3506 } 3507 netdev_err(ndev, "Releasing driver lock\n"); 3508 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3509 } else { 3510 netdev_err(ndev, "Could not acquire driver lock\n"); 3511 goto err_lock; 3512 } 3513 3514 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3515 3516 set_bit(QL_ADAPTER_UP, &qdev->flags); 3517 3518 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3519 3520 napi_enable(&qdev->napi); 3521 ql_enable_interrupts(qdev); 3522 return 0; 3523 3524 err_init: 3525 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3526 err_lock: 3527 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3528 free_irq(qdev->pdev->irq, ndev); 3529 err_irq: 3530 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3531 netdev_info(ndev, "calling pci_disable_msi()\n"); 3532 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3533 pci_disable_msi(qdev->pdev); 3534 } 3535 return err; 3536 } 3537 3538 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3539 { 3540 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3541 netdev_err(qdev->ndev, 3542 "Driver up/down cycle failed, closing device\n"); 3543 rtnl_lock(); 3544 dev_close(qdev->ndev); 3545 rtnl_unlock(); 3546 return -1; 3547 } 3548 return 0; 3549 } 3550 3551 static int ql3xxx_close(struct net_device *ndev) 3552 { 3553 struct ql3_adapter *qdev = netdev_priv(ndev); 3554 3555 /* 3556 * Wait for device to recover from a reset. 3557 * (Rarely happens, but possible.) 3558 */ 3559 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3560 msleep(50); 3561 3562 ql_adapter_down(qdev, QL_DO_RESET); 3563 return 0; 3564 } 3565 3566 static int ql3xxx_open(struct net_device *ndev) 3567 { 3568 struct ql3_adapter *qdev = netdev_priv(ndev); 3569 return ql_adapter_up(qdev); 3570 } 3571 3572 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3573 { 3574 struct ql3_adapter *qdev = netdev_priv(ndev); 3575 struct ql3xxx_port_registers __iomem *port_regs = 3576 qdev->mem_map_registers; 3577 struct sockaddr *addr = p; 3578 unsigned long hw_flags; 3579 3580 if (netif_running(ndev)) 3581 return -EBUSY; 3582 3583 if (!is_valid_ether_addr(addr->sa_data)) 3584 return -EADDRNOTAVAIL; 3585 3586 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3587 3588 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3589 /* Program lower 32 bits of the MAC address */ 3590 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3591 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3592 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3593 ((ndev->dev_addr[2] << 24) | (ndev-> 3594 dev_addr[3] << 16) | 3595 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3596 3597 /* Program top 16 bits of the MAC address */ 3598 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3599 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3600 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3601 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3602 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3603 3604 return 0; 3605 } 3606 3607 static void ql3xxx_tx_timeout(struct net_device *ndev) 3608 { 3609 struct ql3_adapter *qdev = netdev_priv(ndev); 3610 3611 netdev_err(ndev, "Resetting...\n"); 3612 /* 3613 * Stop the queues, we've got a problem. 3614 */ 3615 netif_stop_queue(ndev); 3616 3617 /* 3618 * Wake up the worker to process this event. 3619 */ 3620 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3621 } 3622 3623 static void ql_reset_work(struct work_struct *work) 3624 { 3625 struct ql3_adapter *qdev = 3626 container_of(work, struct ql3_adapter, reset_work.work); 3627 struct net_device *ndev = qdev->ndev; 3628 u32 value; 3629 struct ql_tx_buf_cb *tx_cb; 3630 int max_wait_time, i; 3631 struct ql3xxx_port_registers __iomem *port_regs = 3632 qdev->mem_map_registers; 3633 unsigned long hw_flags; 3634 3635 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3636 clear_bit(QL_LINK_MASTER, &qdev->flags); 3637 3638 /* 3639 * Loop through the active list and return the skb. 3640 */ 3641 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3642 int j; 3643 tx_cb = &qdev->tx_buf[i]; 3644 if (tx_cb->skb) { 3645 netdev_printk(KERN_DEBUG, ndev, 3646 "Freeing lost SKB\n"); 3647 pci_unmap_single(qdev->pdev, 3648 dma_unmap_addr(&tx_cb->map[0], 3649 mapaddr), 3650 dma_unmap_len(&tx_cb->map[0], maplen), 3651 PCI_DMA_TODEVICE); 3652 for (j = 1; j < tx_cb->seg_count; j++) { 3653 pci_unmap_page(qdev->pdev, 3654 dma_unmap_addr(&tx_cb->map[j], 3655 mapaddr), 3656 dma_unmap_len(&tx_cb->map[j], 3657 maplen), 3658 PCI_DMA_TODEVICE); 3659 } 3660 dev_kfree_skb(tx_cb->skb); 3661 tx_cb->skb = NULL; 3662 } 3663 } 3664 3665 netdev_err(ndev, "Clearing NRI after reset\n"); 3666 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3667 ql_write_common_reg(qdev, 3668 &port_regs->CommonRegs. 3669 ispControlStatus, 3670 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3671 /* 3672 * Wait the for Soft Reset to Complete. 3673 */ 3674 max_wait_time = 10; 3675 do { 3676 value = ql_read_common_reg(qdev, 3677 &port_regs->CommonRegs. 3678 3679 ispControlStatus); 3680 if ((value & ISP_CONTROL_SR) == 0) { 3681 netdev_printk(KERN_DEBUG, ndev, 3682 "reset completed\n"); 3683 break; 3684 } 3685 3686 if (value & ISP_CONTROL_RI) { 3687 netdev_printk(KERN_DEBUG, ndev, 3688 "clearing NRI after reset\n"); 3689 ql_write_common_reg(qdev, 3690 &port_regs-> 3691 CommonRegs. 3692 ispControlStatus, 3693 ((ISP_CONTROL_RI << 3694 16) | ISP_CONTROL_RI)); 3695 } 3696 3697 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3698 ssleep(1); 3699 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3700 } while (--max_wait_time); 3701 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3702 3703 if (value & ISP_CONTROL_SR) { 3704 3705 /* 3706 * Set the reset flags and clear the board again. 3707 * Nothing else to do... 3708 */ 3709 netdev_err(ndev, 3710 "Timed out waiting for reset to complete\n"); 3711 netdev_err(ndev, "Do a reset\n"); 3712 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3713 clear_bit(QL_RESET_START, &qdev->flags); 3714 ql_cycle_adapter(qdev, QL_DO_RESET); 3715 return; 3716 } 3717 3718 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3719 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3720 clear_bit(QL_RESET_START, &qdev->flags); 3721 ql_cycle_adapter(qdev, QL_NO_RESET); 3722 } 3723 } 3724 3725 static void ql_tx_timeout_work(struct work_struct *work) 3726 { 3727 struct ql3_adapter *qdev = 3728 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3729 3730 ql_cycle_adapter(qdev, QL_DO_RESET); 3731 } 3732 3733 static void ql_get_board_info(struct ql3_adapter *qdev) 3734 { 3735 struct ql3xxx_port_registers __iomem *port_regs = 3736 qdev->mem_map_registers; 3737 u32 value; 3738 3739 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3740 3741 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3742 if (value & PORT_STATUS_64) 3743 qdev->pci_width = 64; 3744 else 3745 qdev->pci_width = 32; 3746 if (value & PORT_STATUS_X) 3747 qdev->pci_x = 1; 3748 else 3749 qdev->pci_x = 0; 3750 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3751 } 3752 3753 static void ql3xxx_timer(unsigned long ptr) 3754 { 3755 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3756 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3757 } 3758 3759 static const struct net_device_ops ql3xxx_netdev_ops = { 3760 .ndo_open = ql3xxx_open, 3761 .ndo_start_xmit = ql3xxx_send, 3762 .ndo_stop = ql3xxx_close, 3763 .ndo_change_mtu = eth_change_mtu, 3764 .ndo_validate_addr = eth_validate_addr, 3765 .ndo_set_mac_address = ql3xxx_set_mac_address, 3766 .ndo_tx_timeout = ql3xxx_tx_timeout, 3767 }; 3768 3769 static int ql3xxx_probe(struct pci_dev *pdev, 3770 const struct pci_device_id *pci_entry) 3771 { 3772 struct net_device *ndev = NULL; 3773 struct ql3_adapter *qdev = NULL; 3774 static int cards_found; 3775 int uninitialized_var(pci_using_dac), err; 3776 3777 err = pci_enable_device(pdev); 3778 if (err) { 3779 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3780 goto err_out; 3781 } 3782 3783 err = pci_request_regions(pdev, DRV_NAME); 3784 if (err) { 3785 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3786 goto err_out_disable_pdev; 3787 } 3788 3789 pci_set_master(pdev); 3790 3791 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3792 pci_using_dac = 1; 3793 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3794 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3795 pci_using_dac = 0; 3796 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3797 } 3798 3799 if (err) { 3800 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3801 goto err_out_free_regions; 3802 } 3803 3804 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3805 if (!ndev) { 3806 err = -ENOMEM; 3807 goto err_out_free_regions; 3808 } 3809 3810 SET_NETDEV_DEV(ndev, &pdev->dev); 3811 3812 pci_set_drvdata(pdev, ndev); 3813 3814 qdev = netdev_priv(ndev); 3815 qdev->index = cards_found; 3816 qdev->ndev = ndev; 3817 qdev->pdev = pdev; 3818 qdev->device_id = pci_entry->device; 3819 qdev->port_link_state = LS_DOWN; 3820 if (msi) 3821 qdev->msi = 1; 3822 3823 qdev->msg_enable = netif_msg_init(debug, default_msg); 3824 3825 if (pci_using_dac) 3826 ndev->features |= NETIF_F_HIGHDMA; 3827 if (qdev->device_id == QL3032_DEVICE_ID) 3828 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3829 3830 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3831 if (!qdev->mem_map_registers) { 3832 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3833 err = -EIO; 3834 goto err_out_free_ndev; 3835 } 3836 3837 spin_lock_init(&qdev->adapter_lock); 3838 spin_lock_init(&qdev->hw_lock); 3839 3840 /* Set driver entry points */ 3841 ndev->netdev_ops = &ql3xxx_netdev_ops; 3842 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3843 ndev->watchdog_timeo = 5 * HZ; 3844 3845 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3846 3847 ndev->irq = pdev->irq; 3848 3849 /* make sure the EEPROM is good */ 3850 if (ql_get_nvram_params(qdev)) { 3851 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3852 __func__, qdev->index); 3853 err = -EIO; 3854 goto err_out_iounmap; 3855 } 3856 3857 ql_set_mac_info(qdev); 3858 3859 /* Validate and set parameters */ 3860 if (qdev->mac_index) { 3861 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3862 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3863 } else { 3864 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3865 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3866 } 3867 3868 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3869 3870 /* Record PCI bus information. */ 3871 ql_get_board_info(qdev); 3872 3873 /* 3874 * Set the Maximum Memory Read Byte Count value. We do this to handle 3875 * jumbo frames. 3876 */ 3877 if (qdev->pci_x) 3878 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3879 3880 err = register_netdev(ndev); 3881 if (err) { 3882 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3883 goto err_out_iounmap; 3884 } 3885 3886 /* we're going to reset, so assume we have no link for now */ 3887 3888 netif_carrier_off(ndev); 3889 netif_stop_queue(ndev); 3890 3891 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3892 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3893 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3894 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3895 3896 init_timer(&qdev->adapter_timer); 3897 qdev->adapter_timer.function = ql3xxx_timer; 3898 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3899 qdev->adapter_timer.data = (unsigned long)qdev; 3900 3901 if (!cards_found) { 3902 pr_alert("%s\n", DRV_STRING); 3903 pr_alert("Driver name: %s, Version: %s\n", 3904 DRV_NAME, DRV_VERSION); 3905 } 3906 ql_display_dev_info(ndev); 3907 3908 cards_found++; 3909 return 0; 3910 3911 err_out_iounmap: 3912 iounmap(qdev->mem_map_registers); 3913 err_out_free_ndev: 3914 free_netdev(ndev); 3915 err_out_free_regions: 3916 pci_release_regions(pdev); 3917 err_out_disable_pdev: 3918 pci_disable_device(pdev); 3919 err_out: 3920 return err; 3921 } 3922 3923 static void ql3xxx_remove(struct pci_dev *pdev) 3924 { 3925 struct net_device *ndev = pci_get_drvdata(pdev); 3926 struct ql3_adapter *qdev = netdev_priv(ndev); 3927 3928 unregister_netdev(ndev); 3929 3930 ql_disable_interrupts(qdev); 3931 3932 if (qdev->workqueue) { 3933 cancel_delayed_work(&qdev->reset_work); 3934 cancel_delayed_work(&qdev->tx_timeout_work); 3935 destroy_workqueue(qdev->workqueue); 3936 qdev->workqueue = NULL; 3937 } 3938 3939 iounmap(qdev->mem_map_registers); 3940 pci_release_regions(pdev); 3941 free_netdev(ndev); 3942 } 3943 3944 static struct pci_driver ql3xxx_driver = { 3945 3946 .name = DRV_NAME, 3947 .id_table = ql3xxx_pci_tbl, 3948 .probe = ql3xxx_probe, 3949 .remove = ql3xxx_remove, 3950 }; 3951 3952 module_pci_driver(ql3xxx_driver); 3953