1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * QLogic QLA3xxx NIC HBA Driver 4 * Copyright (c) 2003-2006 QLogic Corporation 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/list.h> 13 #include <linux/pci.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/dmapool.h> 18 #include <linux/mempool.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/interrupt.h> 22 #include <linux/errno.h> 23 #include <linux/ioport.h> 24 #include <linux/ip.h> 25 #include <linux/in.h> 26 #include <linux/if_arp.h> 27 #include <linux/if_ether.h> 28 #include <linux/netdevice.h> 29 #include <linux/etherdevice.h> 30 #include <linux/ethtool.h> 31 #include <linux/skbuff.h> 32 #include <linux/rtnetlink.h> 33 #include <linux/if_vlan.h> 34 #include <linux/delay.h> 35 #include <linux/mm.h> 36 #include <linux/prefetch.h> 37 38 #include "qla3xxx.h" 39 40 #define DRV_NAME "qla3xxx" 41 #define DRV_STRING "QLogic ISP3XXX Network Driver" 42 #define DRV_VERSION "v2.03.00-k5" 43 44 static const char ql3xxx_driver_name[] = DRV_NAME; 45 static const char ql3xxx_driver_version[] = DRV_VERSION; 46 47 #define TIMED_OUT_MSG \ 48 "Timed out waiting for management port to get free before issuing command\n" 49 50 MODULE_AUTHOR("QLogic Corporation"); 51 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 52 MODULE_LICENSE("GPL"); 53 MODULE_VERSION(DRV_VERSION); 54 55 static const u32 default_msg 56 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 57 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 58 59 static int debug = -1; /* defaults above */ 60 module_param(debug, int, 0); 61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 62 63 static int msi; 64 module_param(msi, int, 0); 65 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 66 67 static const struct pci_device_id ql3xxx_pci_tbl[] = { 68 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 70 /* required last entry */ 71 {0,} 72 }; 73 74 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 75 76 /* 77 * These are the known PHY's which are used 78 */ 79 enum PHY_DEVICE_TYPE { 80 PHY_TYPE_UNKNOWN = 0, 81 PHY_VITESSE_VSC8211, 82 PHY_AGERE_ET1011C, 83 MAX_PHY_DEV_TYPES 84 }; 85 86 struct PHY_DEVICE_INFO { 87 const enum PHY_DEVICE_TYPE phyDevice; 88 const u32 phyIdOUI; 89 const u16 phyIdModel; 90 const char *name; 91 }; 92 93 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 94 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 95 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 96 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 97 }; 98 99 100 /* 101 * Caller must take hw_lock. 102 */ 103 static int ql_sem_spinlock(struct ql3_adapter *qdev, 104 u32 sem_mask, u32 sem_bits) 105 { 106 struct ql3xxx_port_registers __iomem *port_regs = 107 qdev->mem_map_registers; 108 u32 value; 109 unsigned int seconds = 3; 110 111 do { 112 writel((sem_mask | sem_bits), 113 &port_regs->CommonRegs.semaphoreReg); 114 value = readl(&port_regs->CommonRegs.semaphoreReg); 115 if ((value & (sem_mask >> 16)) == sem_bits) 116 return 0; 117 ssleep(1); 118 } while (--seconds); 119 return -1; 120 } 121 122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 123 { 124 struct ql3xxx_port_registers __iomem *port_regs = 125 qdev->mem_map_registers; 126 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 127 readl(&port_regs->CommonRegs.semaphoreReg); 128 } 129 130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 131 { 132 struct ql3xxx_port_registers __iomem *port_regs = 133 qdev->mem_map_registers; 134 u32 value; 135 136 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 137 value = readl(&port_regs->CommonRegs.semaphoreReg); 138 return ((value & (sem_mask >> 16)) == sem_bits); 139 } 140 141 /* 142 * Caller holds hw_lock. 143 */ 144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 145 { 146 int i = 0; 147 148 do { 149 if (ql_sem_lock(qdev, 150 QL_DRVR_SEM_MASK, 151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 152 * 2) << 1)) { 153 netdev_printk(KERN_DEBUG, qdev->ndev, 154 "driver lock acquired\n"); 155 return 1; 156 } 157 ssleep(1); 158 } while (++i < 10); 159 160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 161 return 0; 162 } 163 164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 165 { 166 struct ql3xxx_port_registers __iomem *port_regs = 167 qdev->mem_map_registers; 168 169 writel(((ISP_CONTROL_NP_MASK << 16) | page), 170 &port_regs->CommonRegs.ispControlStatus); 171 readl(&port_regs->CommonRegs.ispControlStatus); 172 qdev->current_page = page; 173 } 174 175 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 176 { 177 u32 value; 178 unsigned long hw_flags; 179 180 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 181 value = readl(reg); 182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 183 184 return value; 185 } 186 187 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 188 { 189 return readl(reg); 190 } 191 192 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 193 { 194 u32 value; 195 unsigned long hw_flags; 196 197 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 198 199 if (qdev->current_page != 0) 200 ql_set_register_page(qdev, 0); 201 value = readl(reg); 202 203 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 204 return value; 205 } 206 207 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 208 { 209 if (qdev->current_page != 0) 210 ql_set_register_page(qdev, 0); 211 return readl(reg); 212 } 213 214 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 215 u32 __iomem *reg, u32 value) 216 { 217 unsigned long hw_flags; 218 219 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 220 writel(value, reg); 221 readl(reg); 222 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 223 } 224 225 static void ql_write_common_reg(struct ql3_adapter *qdev, 226 u32 __iomem *reg, u32 value) 227 { 228 writel(value, reg); 229 readl(reg); 230 } 231 232 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 233 u32 __iomem *reg, u32 value) 234 { 235 writel(value, reg); 236 readl(reg); 237 udelay(1); 238 } 239 240 static void ql_write_page0_reg(struct ql3_adapter *qdev, 241 u32 __iomem *reg, u32 value) 242 { 243 if (qdev->current_page != 0) 244 ql_set_register_page(qdev, 0); 245 writel(value, reg); 246 readl(reg); 247 } 248 249 /* 250 * Caller holds hw_lock. Only called during init. 251 */ 252 static void ql_write_page1_reg(struct ql3_adapter *qdev, 253 u32 __iomem *reg, u32 value) 254 { 255 if (qdev->current_page != 1) 256 ql_set_register_page(qdev, 1); 257 writel(value, reg); 258 readl(reg); 259 } 260 261 /* 262 * Caller holds hw_lock. Only called during init. 263 */ 264 static void ql_write_page2_reg(struct ql3_adapter *qdev, 265 u32 __iomem *reg, u32 value) 266 { 267 if (qdev->current_page != 2) 268 ql_set_register_page(qdev, 2); 269 writel(value, reg); 270 readl(reg); 271 } 272 273 static void ql_disable_interrupts(struct ql3_adapter *qdev) 274 { 275 struct ql3xxx_port_registers __iomem *port_regs = 276 qdev->mem_map_registers; 277 278 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 279 (ISP_IMR_ENABLE_INT << 16)); 280 281 } 282 283 static void ql_enable_interrupts(struct ql3_adapter *qdev) 284 { 285 struct ql3xxx_port_registers __iomem *port_regs = 286 qdev->mem_map_registers; 287 288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 289 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 290 291 } 292 293 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 294 struct ql_rcv_buf_cb *lrg_buf_cb) 295 { 296 dma_addr_t map; 297 int err; 298 lrg_buf_cb->next = NULL; 299 300 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 301 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 302 } else { 303 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 304 qdev->lrg_buf_free_tail = lrg_buf_cb; 305 } 306 307 if (!lrg_buf_cb->skb) { 308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 309 qdev->lrg_buffer_len); 310 if (unlikely(!lrg_buf_cb->skb)) { 311 qdev->lrg_buf_skb_check++; 312 } else { 313 /* 314 * We save some space to copy the ethhdr from first 315 * buffer 316 */ 317 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 318 map = pci_map_single(qdev->pdev, 319 lrg_buf_cb->skb->data, 320 qdev->lrg_buffer_len - 321 QL_HEADER_SPACE, 322 PCI_DMA_FROMDEVICE); 323 err = pci_dma_mapping_error(qdev->pdev, map); 324 if (err) { 325 netdev_err(qdev->ndev, 326 "PCI mapping failed with error: %d\n", 327 err); 328 dev_kfree_skb(lrg_buf_cb->skb); 329 lrg_buf_cb->skb = NULL; 330 331 qdev->lrg_buf_skb_check++; 332 return; 333 } 334 335 lrg_buf_cb->buf_phy_addr_low = 336 cpu_to_le32(LS_64BITS(map)); 337 lrg_buf_cb->buf_phy_addr_high = 338 cpu_to_le32(MS_64BITS(map)); 339 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 340 dma_unmap_len_set(lrg_buf_cb, maplen, 341 qdev->lrg_buffer_len - 342 QL_HEADER_SPACE); 343 } 344 } 345 346 qdev->lrg_buf_free_count++; 347 } 348 349 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 350 *qdev) 351 { 352 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 353 354 if (lrg_buf_cb != NULL) { 355 qdev->lrg_buf_free_head = lrg_buf_cb->next; 356 if (qdev->lrg_buf_free_head == NULL) 357 qdev->lrg_buf_free_tail = NULL; 358 qdev->lrg_buf_free_count--; 359 } 360 361 return lrg_buf_cb; 362 } 363 364 static u32 addrBits = EEPROM_NO_ADDR_BITS; 365 static u32 dataBits = EEPROM_NO_DATA_BITS; 366 367 static void fm93c56a_deselect(struct ql3_adapter *qdev); 368 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 369 unsigned short *value); 370 371 /* 372 * Caller holds hw_lock. 373 */ 374 static void fm93c56a_select(struct ql3_adapter *qdev) 375 { 376 struct ql3xxx_port_registers __iomem *port_regs = 377 qdev->mem_map_registers; 378 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 379 380 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 381 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 382 } 383 384 /* 385 * Caller holds hw_lock. 386 */ 387 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 388 { 389 int i; 390 u32 mask; 391 u32 dataBit; 392 u32 previousBit; 393 struct ql3xxx_port_registers __iomem *port_regs = 394 qdev->mem_map_registers; 395 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 396 397 /* Clock in a zero, then do the start bit */ 398 ql_write_nvram_reg(qdev, spir, 399 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 400 AUBURN_EEPROM_DO_1)); 401 ql_write_nvram_reg(qdev, spir, 402 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 403 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 404 ql_write_nvram_reg(qdev, spir, 405 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 406 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 407 408 mask = 1 << (FM93C56A_CMD_BITS - 1); 409 /* Force the previous data bit to be different */ 410 previousBit = 0xffff; 411 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 412 dataBit = (cmd & mask) 413 ? AUBURN_EEPROM_DO_1 414 : AUBURN_EEPROM_DO_0; 415 if (previousBit != dataBit) { 416 /* If the bit changed, change the DO state to match */ 417 ql_write_nvram_reg(qdev, spir, 418 (ISP_NVRAM_MASK | 419 qdev->eeprom_cmd_data | dataBit)); 420 previousBit = dataBit; 421 } 422 ql_write_nvram_reg(qdev, spir, 423 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 424 dataBit | AUBURN_EEPROM_CLK_RISE)); 425 ql_write_nvram_reg(qdev, spir, 426 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 427 dataBit | AUBURN_EEPROM_CLK_FALL)); 428 cmd = cmd << 1; 429 } 430 431 mask = 1 << (addrBits - 1); 432 /* Force the previous data bit to be different */ 433 previousBit = 0xffff; 434 for (i = 0; i < addrBits; i++) { 435 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 436 : AUBURN_EEPROM_DO_0; 437 if (previousBit != dataBit) { 438 /* 439 * If the bit changed, then change the DO state to 440 * match 441 */ 442 ql_write_nvram_reg(qdev, spir, 443 (ISP_NVRAM_MASK | 444 qdev->eeprom_cmd_data | dataBit)); 445 previousBit = dataBit; 446 } 447 ql_write_nvram_reg(qdev, spir, 448 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 449 dataBit | AUBURN_EEPROM_CLK_RISE)); 450 ql_write_nvram_reg(qdev, spir, 451 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 452 dataBit | AUBURN_EEPROM_CLK_FALL)); 453 eepromAddr = eepromAddr << 1; 454 } 455 } 456 457 /* 458 * Caller holds hw_lock. 459 */ 460 static void fm93c56a_deselect(struct ql3_adapter *qdev) 461 { 462 struct ql3xxx_port_registers __iomem *port_regs = 463 qdev->mem_map_registers; 464 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 465 466 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 467 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 468 } 469 470 /* 471 * Caller holds hw_lock. 472 */ 473 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 474 { 475 int i; 476 u32 data = 0; 477 u32 dataBit; 478 struct ql3xxx_port_registers __iomem *port_regs = 479 qdev->mem_map_registers; 480 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 481 482 /* Read the data bits */ 483 /* The first bit is a dummy. Clock right over it. */ 484 for (i = 0; i < dataBits; i++) { 485 ql_write_nvram_reg(qdev, spir, 486 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 487 AUBURN_EEPROM_CLK_RISE); 488 ql_write_nvram_reg(qdev, spir, 489 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 490 AUBURN_EEPROM_CLK_FALL); 491 dataBit = (ql_read_common_reg(qdev, spir) & 492 AUBURN_EEPROM_DI_1) ? 1 : 0; 493 data = (data << 1) | dataBit; 494 } 495 *value = (u16)data; 496 } 497 498 /* 499 * Caller holds hw_lock. 500 */ 501 static void eeprom_readword(struct ql3_adapter *qdev, 502 u32 eepromAddr, unsigned short *value) 503 { 504 fm93c56a_select(qdev); 505 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 506 fm93c56a_datain(qdev, value); 507 fm93c56a_deselect(qdev); 508 } 509 510 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 511 { 512 __le16 *p = (__le16 *)ndev->dev_addr; 513 p[0] = cpu_to_le16(addr[0]); 514 p[1] = cpu_to_le16(addr[1]); 515 p[2] = cpu_to_le16(addr[2]); 516 } 517 518 static int ql_get_nvram_params(struct ql3_adapter *qdev) 519 { 520 u16 *pEEPROMData; 521 u16 checksum = 0; 522 u32 index; 523 unsigned long hw_flags; 524 525 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 526 527 pEEPROMData = (u16 *)&qdev->nvram_data; 528 qdev->eeprom_cmd_data = 0; 529 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 530 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 531 2) << 10)) { 532 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 533 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 534 return -1; 535 } 536 537 for (index = 0; index < EEPROM_SIZE; index++) { 538 eeprom_readword(qdev, index, pEEPROMData); 539 checksum += *pEEPROMData; 540 pEEPROMData++; 541 } 542 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 543 544 if (checksum != 0) { 545 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 546 checksum); 547 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 548 return -1; 549 } 550 551 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 552 return checksum; 553 } 554 555 static const u32 PHYAddr[2] = { 556 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 557 }; 558 559 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 560 { 561 struct ql3xxx_port_registers __iomem *port_regs = 562 qdev->mem_map_registers; 563 u32 temp; 564 int count = 1000; 565 566 while (count) { 567 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 568 if (!(temp & MAC_MII_STATUS_BSY)) 569 return 0; 570 udelay(10); 571 count--; 572 } 573 return -1; 574 } 575 576 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 577 { 578 struct ql3xxx_port_registers __iomem *port_regs = 579 qdev->mem_map_registers; 580 u32 scanControl; 581 582 if (qdev->numPorts > 1) { 583 /* Auto scan will cycle through multiple ports */ 584 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 585 } else { 586 scanControl = MAC_MII_CONTROL_SC; 587 } 588 589 /* 590 * Scan register 1 of PHY/PETBI, 591 * Set up to scan both devices 592 * The autoscan starts from the first register, completes 593 * the last one before rolling over to the first 594 */ 595 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 596 PHYAddr[0] | MII_SCAN_REGISTER); 597 598 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 599 (scanControl) | 600 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 601 } 602 603 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 604 { 605 u8 ret; 606 struct ql3xxx_port_registers __iomem *port_regs = 607 qdev->mem_map_registers; 608 609 /* See if scan mode is enabled before we turn it off */ 610 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 611 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 612 /* Scan is enabled */ 613 ret = 1; 614 } else { 615 /* Scan is disabled */ 616 ret = 0; 617 } 618 619 /* 620 * When disabling scan mode you must first change the MII register 621 * address 622 */ 623 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 624 PHYAddr[0] | MII_SCAN_REGISTER); 625 626 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 627 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 628 MAC_MII_CONTROL_RC) << 16)); 629 630 return ret; 631 } 632 633 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 634 u16 regAddr, u16 value, u32 phyAddr) 635 { 636 struct ql3xxx_port_registers __iomem *port_regs = 637 qdev->mem_map_registers; 638 u8 scanWasEnabled; 639 640 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 641 642 if (ql_wait_for_mii_ready(qdev)) { 643 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 644 return -1; 645 } 646 647 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 648 phyAddr | regAddr); 649 650 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 651 652 /* Wait for write to complete 9/10/04 SJP */ 653 if (ql_wait_for_mii_ready(qdev)) { 654 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 655 return -1; 656 } 657 658 if (scanWasEnabled) 659 ql_mii_enable_scan_mode(qdev); 660 661 return 0; 662 } 663 664 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 665 u16 *value, u32 phyAddr) 666 { 667 struct ql3xxx_port_registers __iomem *port_regs = 668 qdev->mem_map_registers; 669 u8 scanWasEnabled; 670 u32 temp; 671 672 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 673 674 if (ql_wait_for_mii_ready(qdev)) { 675 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 676 return -1; 677 } 678 679 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 680 phyAddr | regAddr); 681 682 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 683 (MAC_MII_CONTROL_RC << 16)); 684 685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 686 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 687 688 /* Wait for the read to complete */ 689 if (ql_wait_for_mii_ready(qdev)) { 690 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 691 return -1; 692 } 693 694 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 695 *value = (u16) temp; 696 697 if (scanWasEnabled) 698 ql_mii_enable_scan_mode(qdev); 699 700 return 0; 701 } 702 703 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 704 { 705 struct ql3xxx_port_registers __iomem *port_regs = 706 qdev->mem_map_registers; 707 708 ql_mii_disable_scan_mode(qdev); 709 710 if (ql_wait_for_mii_ready(qdev)) { 711 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 712 return -1; 713 } 714 715 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 716 qdev->PHYAddr | regAddr); 717 718 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 719 720 /* Wait for write to complete. */ 721 if (ql_wait_for_mii_ready(qdev)) { 722 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 723 return -1; 724 } 725 726 ql_mii_enable_scan_mode(qdev); 727 728 return 0; 729 } 730 731 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 732 { 733 u32 temp; 734 struct ql3xxx_port_registers __iomem *port_regs = 735 qdev->mem_map_registers; 736 737 ql_mii_disable_scan_mode(qdev); 738 739 if (ql_wait_for_mii_ready(qdev)) { 740 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 741 return -1; 742 } 743 744 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 745 qdev->PHYAddr | regAddr); 746 747 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 748 (MAC_MII_CONTROL_RC << 16)); 749 750 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 751 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 752 753 /* Wait for the read to complete */ 754 if (ql_wait_for_mii_ready(qdev)) { 755 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 756 return -1; 757 } 758 759 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 760 *value = (u16) temp; 761 762 ql_mii_enable_scan_mode(qdev); 763 764 return 0; 765 } 766 767 static void ql_petbi_reset(struct ql3_adapter *qdev) 768 { 769 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 770 } 771 772 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 773 { 774 u16 reg; 775 776 /* Enable Auto-negotiation sense */ 777 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 778 reg |= PETBI_TBI_AUTO_SENSE; 779 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 780 781 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 782 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 783 784 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 785 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 786 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 787 788 } 789 790 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 791 { 792 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 793 PHYAddr[qdev->mac_index]); 794 } 795 796 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 797 { 798 u16 reg; 799 800 /* Enable Auto-negotiation sense */ 801 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 802 PHYAddr[qdev->mac_index]); 803 reg |= PETBI_TBI_AUTO_SENSE; 804 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 805 PHYAddr[qdev->mac_index]); 806 807 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 808 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 809 PHYAddr[qdev->mac_index]); 810 811 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 812 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 813 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 814 PHYAddr[qdev->mac_index]); 815 } 816 817 static void ql_petbi_init(struct ql3_adapter *qdev) 818 { 819 ql_petbi_reset(qdev); 820 ql_petbi_start_neg(qdev); 821 } 822 823 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 824 { 825 ql_petbi_reset_ex(qdev); 826 ql_petbi_start_neg_ex(qdev); 827 } 828 829 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 830 { 831 u16 reg; 832 833 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 834 return 0; 835 836 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 837 } 838 839 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 840 { 841 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 842 /* power down device bit 11 = 1 */ 843 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 844 /* enable diagnostic mode bit 2 = 1 */ 845 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 846 /* 1000MB amplitude adjust (see Agere errata) */ 847 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 848 /* 1000MB amplitude adjust (see Agere errata) */ 849 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 850 /* 100MB amplitude adjust (see Agere errata) */ 851 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 852 /* 100MB amplitude adjust (see Agere errata) */ 853 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 854 /* 10MB amplitude adjust (see Agere errata) */ 855 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 856 /* 10MB amplitude adjust (see Agere errata) */ 857 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 858 /* point to hidden reg 0x2806 */ 859 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 860 /* Write new PHYAD w/bit 5 set */ 861 ql_mii_write_reg_ex(qdev, 0x11, 862 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 863 /* 864 * Disable diagnostic mode bit 2 = 0 865 * Power up device bit 11 = 0 866 * Link up (on) and activity (blink) 867 */ 868 ql_mii_write_reg(qdev, 0x12, 0x840a); 869 ql_mii_write_reg(qdev, 0x00, 0x1140); 870 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 871 } 872 873 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 874 u16 phyIdReg0, u16 phyIdReg1) 875 { 876 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 877 u32 oui; 878 u16 model; 879 int i; 880 881 if (phyIdReg0 == 0xffff) 882 return result; 883 884 if (phyIdReg1 == 0xffff) 885 return result; 886 887 /* oui is split between two registers */ 888 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 889 890 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 891 892 /* Scan table for this PHY */ 893 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 894 if ((oui == PHY_DEVICES[i].phyIdOUI) && 895 (model == PHY_DEVICES[i].phyIdModel)) { 896 netdev_info(qdev->ndev, "Phy: %s\n", 897 PHY_DEVICES[i].name); 898 result = PHY_DEVICES[i].phyDevice; 899 break; 900 } 901 } 902 903 return result; 904 } 905 906 static int ql_phy_get_speed(struct ql3_adapter *qdev) 907 { 908 u16 reg; 909 910 switch (qdev->phyType) { 911 case PHY_AGERE_ET1011C: { 912 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 913 return 0; 914 915 reg = (reg >> 8) & 3; 916 break; 917 } 918 default: 919 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 920 return 0; 921 922 reg = (((reg & 0x18) >> 3) & 3); 923 } 924 925 switch (reg) { 926 case 2: 927 return SPEED_1000; 928 case 1: 929 return SPEED_100; 930 case 0: 931 return SPEED_10; 932 default: 933 return -1; 934 } 935 } 936 937 static int ql_is_full_dup(struct ql3_adapter *qdev) 938 { 939 u16 reg; 940 941 switch (qdev->phyType) { 942 case PHY_AGERE_ET1011C: { 943 if (ql_mii_read_reg(qdev, 0x1A, ®)) 944 return 0; 945 946 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 947 } 948 case PHY_VITESSE_VSC8211: 949 default: { 950 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 951 return 0; 952 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 953 } 954 } 955 } 956 957 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 958 { 959 u16 reg; 960 961 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 962 return 0; 963 964 return (reg & PHY_NEG_PAUSE) != 0; 965 } 966 967 static int PHY_Setup(struct ql3_adapter *qdev) 968 { 969 u16 reg1; 970 u16 reg2; 971 bool agereAddrChangeNeeded = false; 972 u32 miiAddr = 0; 973 int err; 974 975 /* Determine the PHY we are using by reading the ID's */ 976 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 977 if (err != 0) { 978 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 979 return err; 980 } 981 982 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 983 if (err != 0) { 984 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 985 return err; 986 } 987 988 /* Check if we have a Agere PHY */ 989 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 990 991 /* Determine which MII address we should be using 992 determined by the index of the card */ 993 if (qdev->mac_index == 0) 994 miiAddr = MII_AGERE_ADDR_1; 995 else 996 miiAddr = MII_AGERE_ADDR_2; 997 998 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 999 if (err != 0) { 1000 netdev_err(qdev->ndev, 1001 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1002 return err; 1003 } 1004 1005 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1006 if (err != 0) { 1007 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1008 return err; 1009 } 1010 1011 /* We need to remember to initialize the Agere PHY */ 1012 agereAddrChangeNeeded = true; 1013 } 1014 1015 /* Determine the particular PHY we have on board to apply 1016 PHY specific initializations */ 1017 qdev->phyType = getPhyType(qdev, reg1, reg2); 1018 1019 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1020 /* need this here so address gets changed */ 1021 phyAgereSpecificInit(qdev, miiAddr); 1022 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1023 netdev_err(qdev->ndev, "PHY is unknown\n"); 1024 return -EIO; 1025 } 1026 1027 return 0; 1028 } 1029 1030 /* 1031 * Caller holds hw_lock. 1032 */ 1033 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1034 { 1035 struct ql3xxx_port_registers __iomem *port_regs = 1036 qdev->mem_map_registers; 1037 u32 value; 1038 1039 if (enable) 1040 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1041 else 1042 value = (MAC_CONFIG_REG_PE << 16); 1043 1044 if (qdev->mac_index) 1045 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1046 else 1047 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1048 } 1049 1050 /* 1051 * Caller holds hw_lock. 1052 */ 1053 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1054 { 1055 struct ql3xxx_port_registers __iomem *port_regs = 1056 qdev->mem_map_registers; 1057 u32 value; 1058 1059 if (enable) 1060 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1061 else 1062 value = (MAC_CONFIG_REG_SR << 16); 1063 1064 if (qdev->mac_index) 1065 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1066 else 1067 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1068 } 1069 1070 /* 1071 * Caller holds hw_lock. 1072 */ 1073 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1074 { 1075 struct ql3xxx_port_registers __iomem *port_regs = 1076 qdev->mem_map_registers; 1077 u32 value; 1078 1079 if (enable) 1080 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1081 else 1082 value = (MAC_CONFIG_REG_GM << 16); 1083 1084 if (qdev->mac_index) 1085 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1086 else 1087 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1088 } 1089 1090 /* 1091 * Caller holds hw_lock. 1092 */ 1093 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1094 { 1095 struct ql3xxx_port_registers __iomem *port_regs = 1096 qdev->mem_map_registers; 1097 u32 value; 1098 1099 if (enable) 1100 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1101 else 1102 value = (MAC_CONFIG_REG_FD << 16); 1103 1104 if (qdev->mac_index) 1105 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1106 else 1107 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1108 } 1109 1110 /* 1111 * Caller holds hw_lock. 1112 */ 1113 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1114 { 1115 struct ql3xxx_port_registers __iomem *port_regs = 1116 qdev->mem_map_registers; 1117 u32 value; 1118 1119 if (enable) 1120 value = 1121 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1122 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1123 else 1124 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1125 1126 if (qdev->mac_index) 1127 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1128 else 1129 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1130 } 1131 1132 /* 1133 * Caller holds hw_lock. 1134 */ 1135 static int ql_is_fiber(struct ql3_adapter *qdev) 1136 { 1137 struct ql3xxx_port_registers __iomem *port_regs = 1138 qdev->mem_map_registers; 1139 u32 bitToCheck = 0; 1140 u32 temp; 1141 1142 switch (qdev->mac_index) { 1143 case 0: 1144 bitToCheck = PORT_STATUS_SM0; 1145 break; 1146 case 1: 1147 bitToCheck = PORT_STATUS_SM1; 1148 break; 1149 } 1150 1151 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1152 return (temp & bitToCheck) != 0; 1153 } 1154 1155 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1156 { 1157 u16 reg; 1158 ql_mii_read_reg(qdev, 0x00, ®); 1159 return (reg & 0x1000) != 0; 1160 } 1161 1162 /* 1163 * Caller holds hw_lock. 1164 */ 1165 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1166 { 1167 struct ql3xxx_port_registers __iomem *port_regs = 1168 qdev->mem_map_registers; 1169 u32 bitToCheck = 0; 1170 u32 temp; 1171 1172 switch (qdev->mac_index) { 1173 case 0: 1174 bitToCheck = PORT_STATUS_AC0; 1175 break; 1176 case 1: 1177 bitToCheck = PORT_STATUS_AC1; 1178 break; 1179 } 1180 1181 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1182 if (temp & bitToCheck) { 1183 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1184 return 1; 1185 } 1186 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1187 return 0; 1188 } 1189 1190 /* 1191 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1192 */ 1193 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1194 { 1195 if (ql_is_fiber(qdev)) 1196 return ql_is_petbi_neg_pause(qdev); 1197 else 1198 return ql_is_phy_neg_pause(qdev); 1199 } 1200 1201 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1202 { 1203 struct ql3xxx_port_registers __iomem *port_regs = 1204 qdev->mem_map_registers; 1205 u32 bitToCheck = 0; 1206 u32 temp; 1207 1208 switch (qdev->mac_index) { 1209 case 0: 1210 bitToCheck = PORT_STATUS_AE0; 1211 break; 1212 case 1: 1213 bitToCheck = PORT_STATUS_AE1; 1214 break; 1215 } 1216 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1217 return (temp & bitToCheck) != 0; 1218 } 1219 1220 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1221 { 1222 if (ql_is_fiber(qdev)) 1223 return SPEED_1000; 1224 else 1225 return ql_phy_get_speed(qdev); 1226 } 1227 1228 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1229 { 1230 if (ql_is_fiber(qdev)) 1231 return 1; 1232 else 1233 return ql_is_full_dup(qdev); 1234 } 1235 1236 /* 1237 * Caller holds hw_lock. 1238 */ 1239 static int ql_link_down_detect(struct ql3_adapter *qdev) 1240 { 1241 struct ql3xxx_port_registers __iomem *port_regs = 1242 qdev->mem_map_registers; 1243 u32 bitToCheck = 0; 1244 u32 temp; 1245 1246 switch (qdev->mac_index) { 1247 case 0: 1248 bitToCheck = ISP_CONTROL_LINK_DN_0; 1249 break; 1250 case 1: 1251 bitToCheck = ISP_CONTROL_LINK_DN_1; 1252 break; 1253 } 1254 1255 temp = 1256 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1257 return (temp & bitToCheck) != 0; 1258 } 1259 1260 /* 1261 * Caller holds hw_lock. 1262 */ 1263 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1264 { 1265 struct ql3xxx_port_registers __iomem *port_regs = 1266 qdev->mem_map_registers; 1267 1268 switch (qdev->mac_index) { 1269 case 0: 1270 ql_write_common_reg(qdev, 1271 &port_regs->CommonRegs.ispControlStatus, 1272 (ISP_CONTROL_LINK_DN_0) | 1273 (ISP_CONTROL_LINK_DN_0 << 16)); 1274 break; 1275 1276 case 1: 1277 ql_write_common_reg(qdev, 1278 &port_regs->CommonRegs.ispControlStatus, 1279 (ISP_CONTROL_LINK_DN_1) | 1280 (ISP_CONTROL_LINK_DN_1 << 16)); 1281 break; 1282 1283 default: 1284 return 1; 1285 } 1286 1287 return 0; 1288 } 1289 1290 /* 1291 * Caller holds hw_lock. 1292 */ 1293 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1294 { 1295 struct ql3xxx_port_registers __iomem *port_regs = 1296 qdev->mem_map_registers; 1297 u32 bitToCheck = 0; 1298 u32 temp; 1299 1300 switch (qdev->mac_index) { 1301 case 0: 1302 bitToCheck = PORT_STATUS_F1_ENABLED; 1303 break; 1304 case 1: 1305 bitToCheck = PORT_STATUS_F3_ENABLED; 1306 break; 1307 default: 1308 break; 1309 } 1310 1311 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1312 if (temp & bitToCheck) { 1313 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1314 "not link master\n"); 1315 return 0; 1316 } 1317 1318 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1319 return 1; 1320 } 1321 1322 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1323 { 1324 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1325 PHYAddr[qdev->mac_index]); 1326 } 1327 1328 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1329 { 1330 u16 reg; 1331 u16 portConfiguration; 1332 1333 if (qdev->phyType == PHY_AGERE_ET1011C) 1334 ql_mii_write_reg(qdev, 0x13, 0x0000); 1335 /* turn off external loopback */ 1336 1337 if (qdev->mac_index == 0) 1338 portConfiguration = 1339 qdev->nvram_data.macCfg_port0.portConfiguration; 1340 else 1341 portConfiguration = 1342 qdev->nvram_data.macCfg_port1.portConfiguration; 1343 1344 /* Some HBA's in the field are set to 0 and they need to 1345 be reinterpreted with a default value */ 1346 if (portConfiguration == 0) 1347 portConfiguration = PORT_CONFIG_DEFAULT; 1348 1349 /* Set the 1000 advertisements */ 1350 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1351 PHYAddr[qdev->mac_index]); 1352 reg &= ~PHY_GIG_ALL_PARAMS; 1353 1354 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1355 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1356 reg |= PHY_GIG_ADV_1000F; 1357 else 1358 reg |= PHY_GIG_ADV_1000H; 1359 } 1360 1361 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1362 PHYAddr[qdev->mac_index]); 1363 1364 /* Set the 10/100 & pause negotiation advertisements */ 1365 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1366 PHYAddr[qdev->mac_index]); 1367 reg &= ~PHY_NEG_ALL_PARAMS; 1368 1369 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1370 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1371 1372 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1373 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1374 reg |= PHY_NEG_ADV_100F; 1375 1376 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1377 reg |= PHY_NEG_ADV_10F; 1378 } 1379 1380 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1381 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1382 reg |= PHY_NEG_ADV_100H; 1383 1384 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1385 reg |= PHY_NEG_ADV_10H; 1386 } 1387 1388 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1389 reg |= 1; 1390 1391 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1392 PHYAddr[qdev->mac_index]); 1393 1394 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1395 1396 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1397 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1398 PHYAddr[qdev->mac_index]); 1399 } 1400 1401 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1402 { 1403 ql_phy_reset_ex(qdev); 1404 PHY_Setup(qdev); 1405 ql_phy_start_neg_ex(qdev); 1406 } 1407 1408 /* 1409 * Caller holds hw_lock. 1410 */ 1411 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1412 { 1413 struct ql3xxx_port_registers __iomem *port_regs = 1414 qdev->mem_map_registers; 1415 u32 bitToCheck = 0; 1416 u32 temp, linkState; 1417 1418 switch (qdev->mac_index) { 1419 case 0: 1420 bitToCheck = PORT_STATUS_UP0; 1421 break; 1422 case 1: 1423 bitToCheck = PORT_STATUS_UP1; 1424 break; 1425 } 1426 1427 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1428 if (temp & bitToCheck) 1429 linkState = LS_UP; 1430 else 1431 linkState = LS_DOWN; 1432 1433 return linkState; 1434 } 1435 1436 static int ql_port_start(struct ql3_adapter *qdev) 1437 { 1438 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1439 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1440 2) << 7)) { 1441 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1442 return -1; 1443 } 1444 1445 if (ql_is_fiber(qdev)) { 1446 ql_petbi_init(qdev); 1447 } else { 1448 /* Copper port */ 1449 ql_phy_init_ex(qdev); 1450 } 1451 1452 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1453 return 0; 1454 } 1455 1456 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1457 { 1458 1459 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1460 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1461 2) << 7)) 1462 return -1; 1463 1464 if (!ql_auto_neg_error(qdev)) { 1465 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1466 /* configure the MAC */ 1467 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1468 "Configuring link\n"); 1469 ql_mac_cfg_soft_reset(qdev, 1); 1470 ql_mac_cfg_gig(qdev, 1471 (ql_get_link_speed 1472 (qdev) == 1473 SPEED_1000)); 1474 ql_mac_cfg_full_dup(qdev, 1475 ql_is_link_full_dup 1476 (qdev)); 1477 ql_mac_cfg_pause(qdev, 1478 ql_is_neg_pause 1479 (qdev)); 1480 ql_mac_cfg_soft_reset(qdev, 0); 1481 1482 /* enable the MAC */ 1483 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1484 "Enabling mac\n"); 1485 ql_mac_enable(qdev, 1); 1486 } 1487 1488 qdev->port_link_state = LS_UP; 1489 netif_start_queue(qdev->ndev); 1490 netif_carrier_on(qdev->ndev); 1491 netif_info(qdev, link, qdev->ndev, 1492 "Link is up at %d Mbps, %s duplex\n", 1493 ql_get_link_speed(qdev), 1494 ql_is_link_full_dup(qdev) ? "full" : "half"); 1495 1496 } else { /* Remote error detected */ 1497 1498 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1499 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1500 "Remote error detected. Calling ql_port_start()\n"); 1501 /* 1502 * ql_port_start() is shared code and needs 1503 * to lock the PHY on it's own. 1504 */ 1505 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1506 if (ql_port_start(qdev)) /* Restart port */ 1507 return -1; 1508 return 0; 1509 } 1510 } 1511 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1512 return 0; 1513 } 1514 1515 static void ql_link_state_machine_work(struct work_struct *work) 1516 { 1517 struct ql3_adapter *qdev = 1518 container_of(work, struct ql3_adapter, link_state_work.work); 1519 1520 u32 curr_link_state; 1521 unsigned long hw_flags; 1522 1523 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1524 1525 curr_link_state = ql_get_link_state(qdev); 1526 1527 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1528 netif_info(qdev, link, qdev->ndev, 1529 "Reset in progress, skip processing link state\n"); 1530 1531 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1532 1533 /* Restart timer on 2 second interval. */ 1534 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1535 1536 return; 1537 } 1538 1539 switch (qdev->port_link_state) { 1540 default: 1541 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1542 ql_port_start(qdev); 1543 qdev->port_link_state = LS_DOWN; 1544 fallthrough; 1545 1546 case LS_DOWN: 1547 if (curr_link_state == LS_UP) { 1548 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1549 if (ql_is_auto_neg_complete(qdev)) 1550 ql_finish_auto_neg(qdev); 1551 1552 if (qdev->port_link_state == LS_UP) 1553 ql_link_down_detect_clear(qdev); 1554 1555 qdev->port_link_state = LS_UP; 1556 } 1557 break; 1558 1559 case LS_UP: 1560 /* 1561 * See if the link is currently down or went down and came 1562 * back up 1563 */ 1564 if (curr_link_state == LS_DOWN) { 1565 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1566 qdev->port_link_state = LS_DOWN; 1567 } 1568 if (ql_link_down_detect(qdev)) 1569 qdev->port_link_state = LS_DOWN; 1570 break; 1571 } 1572 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1573 1574 /* Restart timer on 2 second interval. */ 1575 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1576 } 1577 1578 /* 1579 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1580 */ 1581 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1582 { 1583 if (ql_this_adapter_controls_port(qdev)) 1584 set_bit(QL_LINK_MASTER, &qdev->flags); 1585 else 1586 clear_bit(QL_LINK_MASTER, &qdev->flags); 1587 } 1588 1589 /* 1590 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1591 */ 1592 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1593 { 1594 ql_mii_enable_scan_mode(qdev); 1595 1596 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1597 if (ql_this_adapter_controls_port(qdev)) 1598 ql_petbi_init_ex(qdev); 1599 } else { 1600 if (ql_this_adapter_controls_port(qdev)) 1601 ql_phy_init_ex(qdev); 1602 } 1603 } 1604 1605 /* 1606 * MII_Setup needs to be called before taking the PHY out of reset 1607 * so that the management interface clock speed can be set properly. 1608 * It would be better if we had a way to disable MDC until after the 1609 * PHY is out of reset, but we don't have that capability. 1610 */ 1611 static int ql_mii_setup(struct ql3_adapter *qdev) 1612 { 1613 u32 reg; 1614 struct ql3xxx_port_registers __iomem *port_regs = 1615 qdev->mem_map_registers; 1616 1617 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1618 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1619 2) << 7)) 1620 return -1; 1621 1622 if (qdev->device_id == QL3032_DEVICE_ID) 1623 ql_write_page0_reg(qdev, 1624 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1625 1626 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1627 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1628 1629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1630 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1631 1632 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1633 return 0; 1634 } 1635 1636 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1637 SUPPORTED_FIBRE | \ 1638 SUPPORTED_Autoneg) 1639 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1640 SUPPORTED_10baseT_Full | \ 1641 SUPPORTED_100baseT_Half | \ 1642 SUPPORTED_100baseT_Full | \ 1643 SUPPORTED_1000baseT_Half | \ 1644 SUPPORTED_1000baseT_Full | \ 1645 SUPPORTED_Autoneg | \ 1646 SUPPORTED_TP) \ 1647 1648 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1649 { 1650 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1651 return SUPPORTED_OPTICAL_MODES; 1652 1653 return SUPPORTED_TP_MODES; 1654 } 1655 1656 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1657 { 1658 int status; 1659 unsigned long hw_flags; 1660 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1661 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1662 (QL_RESOURCE_BITS_BASE_CODE | 1663 (qdev->mac_index) * 2) << 7)) { 1664 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1665 return 0; 1666 } 1667 status = ql_is_auto_cfg(qdev); 1668 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1669 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1670 return status; 1671 } 1672 1673 static u32 ql_get_speed(struct ql3_adapter *qdev) 1674 { 1675 u32 status; 1676 unsigned long hw_flags; 1677 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1678 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1679 (QL_RESOURCE_BITS_BASE_CODE | 1680 (qdev->mac_index) * 2) << 7)) { 1681 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1682 return 0; 1683 } 1684 status = ql_get_link_speed(qdev); 1685 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1686 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1687 return status; 1688 } 1689 1690 static int ql_get_full_dup(struct ql3_adapter *qdev) 1691 { 1692 int status; 1693 unsigned long hw_flags; 1694 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1695 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1696 (QL_RESOURCE_BITS_BASE_CODE | 1697 (qdev->mac_index) * 2) << 7)) { 1698 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1699 return 0; 1700 } 1701 status = ql_is_link_full_dup(qdev); 1702 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1703 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1704 return status; 1705 } 1706 1707 static int ql_get_link_ksettings(struct net_device *ndev, 1708 struct ethtool_link_ksettings *cmd) 1709 { 1710 struct ql3_adapter *qdev = netdev_priv(ndev); 1711 u32 supported, advertising; 1712 1713 supported = ql_supported_modes(qdev); 1714 1715 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1716 cmd->base.port = PORT_FIBRE; 1717 } else { 1718 cmd->base.port = PORT_TP; 1719 cmd->base.phy_address = qdev->PHYAddr; 1720 } 1721 advertising = ql_supported_modes(qdev); 1722 cmd->base.autoneg = ql_get_auto_cfg_status(qdev); 1723 cmd->base.speed = ql_get_speed(qdev); 1724 cmd->base.duplex = ql_get_full_dup(qdev); 1725 1726 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1727 supported); 1728 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1729 advertising); 1730 1731 return 0; 1732 } 1733 1734 static void ql_get_drvinfo(struct net_device *ndev, 1735 struct ethtool_drvinfo *drvinfo) 1736 { 1737 struct ql3_adapter *qdev = netdev_priv(ndev); 1738 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1739 strlcpy(drvinfo->version, ql3xxx_driver_version, 1740 sizeof(drvinfo->version)); 1741 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1742 sizeof(drvinfo->bus_info)); 1743 } 1744 1745 static u32 ql_get_msglevel(struct net_device *ndev) 1746 { 1747 struct ql3_adapter *qdev = netdev_priv(ndev); 1748 return qdev->msg_enable; 1749 } 1750 1751 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1752 { 1753 struct ql3_adapter *qdev = netdev_priv(ndev); 1754 qdev->msg_enable = value; 1755 } 1756 1757 static void ql_get_pauseparam(struct net_device *ndev, 1758 struct ethtool_pauseparam *pause) 1759 { 1760 struct ql3_adapter *qdev = netdev_priv(ndev); 1761 struct ql3xxx_port_registers __iomem *port_regs = 1762 qdev->mem_map_registers; 1763 1764 u32 reg; 1765 if (qdev->mac_index == 0) 1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1767 else 1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1769 1770 pause->autoneg = ql_get_auto_cfg_status(qdev); 1771 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1772 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1773 } 1774 1775 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1776 .get_drvinfo = ql_get_drvinfo, 1777 .get_link = ethtool_op_get_link, 1778 .get_msglevel = ql_get_msglevel, 1779 .set_msglevel = ql_set_msglevel, 1780 .get_pauseparam = ql_get_pauseparam, 1781 .get_link_ksettings = ql_get_link_ksettings, 1782 }; 1783 1784 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1785 { 1786 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1787 dma_addr_t map; 1788 int err; 1789 1790 while (lrg_buf_cb) { 1791 if (!lrg_buf_cb->skb) { 1792 lrg_buf_cb->skb = 1793 netdev_alloc_skb(qdev->ndev, 1794 qdev->lrg_buffer_len); 1795 if (unlikely(!lrg_buf_cb->skb)) { 1796 netdev_printk(KERN_DEBUG, qdev->ndev, 1797 "Failed netdev_alloc_skb()\n"); 1798 break; 1799 } else { 1800 /* 1801 * We save some space to copy the ethhdr from 1802 * first buffer 1803 */ 1804 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1805 map = pci_map_single(qdev->pdev, 1806 lrg_buf_cb->skb->data, 1807 qdev->lrg_buffer_len - 1808 QL_HEADER_SPACE, 1809 PCI_DMA_FROMDEVICE); 1810 1811 err = pci_dma_mapping_error(qdev->pdev, map); 1812 if (err) { 1813 netdev_err(qdev->ndev, 1814 "PCI mapping failed with error: %d\n", 1815 err); 1816 dev_kfree_skb(lrg_buf_cb->skb); 1817 lrg_buf_cb->skb = NULL; 1818 break; 1819 } 1820 1821 1822 lrg_buf_cb->buf_phy_addr_low = 1823 cpu_to_le32(LS_64BITS(map)); 1824 lrg_buf_cb->buf_phy_addr_high = 1825 cpu_to_le32(MS_64BITS(map)); 1826 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1827 dma_unmap_len_set(lrg_buf_cb, maplen, 1828 qdev->lrg_buffer_len - 1829 QL_HEADER_SPACE); 1830 --qdev->lrg_buf_skb_check; 1831 if (!qdev->lrg_buf_skb_check) 1832 return 1; 1833 } 1834 } 1835 lrg_buf_cb = lrg_buf_cb->next; 1836 } 1837 return 0; 1838 } 1839 1840 /* 1841 * Caller holds hw_lock. 1842 */ 1843 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1844 { 1845 struct ql3xxx_port_registers __iomem *port_regs = 1846 qdev->mem_map_registers; 1847 1848 if (qdev->small_buf_release_cnt >= 16) { 1849 while (qdev->small_buf_release_cnt >= 16) { 1850 qdev->small_buf_q_producer_index++; 1851 1852 if (qdev->small_buf_q_producer_index == 1853 NUM_SBUFQ_ENTRIES) 1854 qdev->small_buf_q_producer_index = 0; 1855 qdev->small_buf_release_cnt -= 8; 1856 } 1857 wmb(); 1858 writel_relaxed(qdev->small_buf_q_producer_index, 1859 &port_regs->CommonRegs.rxSmallQProducerIndex); 1860 } 1861 } 1862 1863 /* 1864 * Caller holds hw_lock. 1865 */ 1866 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1867 { 1868 struct bufq_addr_element *lrg_buf_q_ele; 1869 int i; 1870 struct ql_rcv_buf_cb *lrg_buf_cb; 1871 struct ql3xxx_port_registers __iomem *port_regs = 1872 qdev->mem_map_registers; 1873 1874 if ((qdev->lrg_buf_free_count >= 8) && 1875 (qdev->lrg_buf_release_cnt >= 16)) { 1876 1877 if (qdev->lrg_buf_skb_check) 1878 if (!ql_populate_free_queue(qdev)) 1879 return; 1880 1881 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1882 1883 while ((qdev->lrg_buf_release_cnt >= 16) && 1884 (qdev->lrg_buf_free_count >= 8)) { 1885 1886 for (i = 0; i < 8; i++) { 1887 lrg_buf_cb = 1888 ql_get_from_lrg_buf_free_list(qdev); 1889 lrg_buf_q_ele->addr_high = 1890 lrg_buf_cb->buf_phy_addr_high; 1891 lrg_buf_q_ele->addr_low = 1892 lrg_buf_cb->buf_phy_addr_low; 1893 lrg_buf_q_ele++; 1894 1895 qdev->lrg_buf_release_cnt--; 1896 } 1897 1898 qdev->lrg_buf_q_producer_index++; 1899 1900 if (qdev->lrg_buf_q_producer_index == 1901 qdev->num_lbufq_entries) 1902 qdev->lrg_buf_q_producer_index = 0; 1903 1904 if (qdev->lrg_buf_q_producer_index == 1905 (qdev->num_lbufq_entries - 1)) { 1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1907 } 1908 } 1909 wmb(); 1910 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1911 writel(qdev->lrg_buf_q_producer_index, 1912 &port_regs->CommonRegs.rxLargeQProducerIndex); 1913 } 1914 } 1915 1916 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1917 struct ob_mac_iocb_rsp *mac_rsp) 1918 { 1919 struct ql_tx_buf_cb *tx_cb; 1920 int i; 1921 1922 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1923 netdev_warn(qdev->ndev, 1924 "Frame too short but it was padded and sent\n"); 1925 } 1926 1927 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1928 1929 /* Check the transmit response flags for any errors */ 1930 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1931 netdev_err(qdev->ndev, 1932 "Frame too short to be legal, frame not sent\n"); 1933 1934 qdev->ndev->stats.tx_errors++; 1935 goto frame_not_sent; 1936 } 1937 1938 if (tx_cb->seg_count == 0) { 1939 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1940 mac_rsp->transaction_id); 1941 1942 qdev->ndev->stats.tx_errors++; 1943 goto invalid_seg_count; 1944 } 1945 1946 pci_unmap_single(qdev->pdev, 1947 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1948 dma_unmap_len(&tx_cb->map[0], maplen), 1949 PCI_DMA_TODEVICE); 1950 tx_cb->seg_count--; 1951 if (tx_cb->seg_count) { 1952 for (i = 1; i < tx_cb->seg_count; i++) { 1953 pci_unmap_page(qdev->pdev, 1954 dma_unmap_addr(&tx_cb->map[i], 1955 mapaddr), 1956 dma_unmap_len(&tx_cb->map[i], maplen), 1957 PCI_DMA_TODEVICE); 1958 } 1959 } 1960 qdev->ndev->stats.tx_packets++; 1961 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1962 1963 frame_not_sent: 1964 dev_kfree_skb_irq(tx_cb->skb); 1965 tx_cb->skb = NULL; 1966 1967 invalid_seg_count: 1968 atomic_inc(&qdev->tx_count); 1969 } 1970 1971 static void ql_get_sbuf(struct ql3_adapter *qdev) 1972 { 1973 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1974 qdev->small_buf_index = 0; 1975 qdev->small_buf_release_cnt++; 1976 } 1977 1978 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1979 { 1980 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1981 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1982 qdev->lrg_buf_release_cnt++; 1983 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1984 qdev->lrg_buf_index = 0; 1985 return lrg_buf_cb; 1986 } 1987 1988 /* 1989 * The difference between 3022 and 3032 for inbound completions: 1990 * 3022 uses two buffers per completion. The first buffer contains 1991 * (some) header info, the second the remainder of the headers plus 1992 * the data. For this chip we reserve some space at the top of the 1993 * receive buffer so that the header info in buffer one can be 1994 * prepended to the buffer two. Buffer two is the sent up while 1995 * buffer one is returned to the hardware to be reused. 1996 * 3032 receives all of it's data and headers in one buffer for a 1997 * simpler process. 3032 also supports checksum verification as 1998 * can be seen in ql_process_macip_rx_intr(). 1999 */ 2000 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2001 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2002 { 2003 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2004 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2005 struct sk_buff *skb; 2006 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2007 2008 /* 2009 * Get the inbound address list (small buffer). 2010 */ 2011 ql_get_sbuf(qdev); 2012 2013 if (qdev->device_id == QL3022_DEVICE_ID) 2014 lrg_buf_cb1 = ql_get_lbuf(qdev); 2015 2016 /* start of second buffer */ 2017 lrg_buf_cb2 = ql_get_lbuf(qdev); 2018 skb = lrg_buf_cb2->skb; 2019 2020 qdev->ndev->stats.rx_packets++; 2021 qdev->ndev->stats.rx_bytes += length; 2022 2023 skb_put(skb, length); 2024 pci_unmap_single(qdev->pdev, 2025 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2026 dma_unmap_len(lrg_buf_cb2, maplen), 2027 PCI_DMA_FROMDEVICE); 2028 prefetch(skb->data); 2029 skb_checksum_none_assert(skb); 2030 skb->protocol = eth_type_trans(skb, qdev->ndev); 2031 2032 napi_gro_receive(&qdev->napi, skb); 2033 lrg_buf_cb2->skb = NULL; 2034 2035 if (qdev->device_id == QL3022_DEVICE_ID) 2036 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2037 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2038 } 2039 2040 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2041 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2042 { 2043 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2044 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2045 struct sk_buff *skb1 = NULL, *skb2; 2046 struct net_device *ndev = qdev->ndev; 2047 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2048 u16 size = 0; 2049 2050 /* 2051 * Get the inbound address list (small buffer). 2052 */ 2053 2054 ql_get_sbuf(qdev); 2055 2056 if (qdev->device_id == QL3022_DEVICE_ID) { 2057 /* start of first buffer on 3022 */ 2058 lrg_buf_cb1 = ql_get_lbuf(qdev); 2059 skb1 = lrg_buf_cb1->skb; 2060 size = ETH_HLEN; 2061 if (*((u16 *) skb1->data) != 0xFFFF) 2062 size += VLAN_ETH_HLEN - ETH_HLEN; 2063 } 2064 2065 /* start of second buffer */ 2066 lrg_buf_cb2 = ql_get_lbuf(qdev); 2067 skb2 = lrg_buf_cb2->skb; 2068 2069 skb_put(skb2, length); /* Just the second buffer length here. */ 2070 pci_unmap_single(qdev->pdev, 2071 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2072 dma_unmap_len(lrg_buf_cb2, maplen), 2073 PCI_DMA_FROMDEVICE); 2074 prefetch(skb2->data); 2075 2076 skb_checksum_none_assert(skb2); 2077 if (qdev->device_id == QL3022_DEVICE_ID) { 2078 /* 2079 * Copy the ethhdr from first buffer to second. This 2080 * is necessary for 3022 IP completions. 2081 */ 2082 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2083 skb_push(skb2, size), size); 2084 } else { 2085 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2086 if (checksum & 2087 (IB_IP_IOCB_RSP_3032_ICE | 2088 IB_IP_IOCB_RSP_3032_CE)) { 2089 netdev_err(ndev, 2090 "%s: Bad checksum for this %s packet, checksum = %x\n", 2091 __func__, 2092 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2093 "TCP" : "UDP"), checksum); 2094 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2095 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2096 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2097 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2098 } 2099 } 2100 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2101 2102 napi_gro_receive(&qdev->napi, skb2); 2103 ndev->stats.rx_packets++; 2104 ndev->stats.rx_bytes += length; 2105 lrg_buf_cb2->skb = NULL; 2106 2107 if (qdev->device_id == QL3022_DEVICE_ID) 2108 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2109 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2110 } 2111 2112 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) 2113 { 2114 struct net_rsp_iocb *net_rsp; 2115 struct net_device *ndev = qdev->ndev; 2116 int work_done = 0; 2117 2118 /* While there are entries in the completion queue. */ 2119 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2120 qdev->rsp_consumer_index) && (work_done < budget)) { 2121 2122 net_rsp = qdev->rsp_current; 2123 rmb(); 2124 /* 2125 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2126 * if the inbound completion is for a VLAN. 2127 */ 2128 if (qdev->device_id == QL3032_DEVICE_ID) 2129 net_rsp->opcode &= 0x7f; 2130 switch (net_rsp->opcode) { 2131 2132 case OPCODE_OB_MAC_IOCB_FN0: 2133 case OPCODE_OB_MAC_IOCB_FN2: 2134 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2135 net_rsp); 2136 break; 2137 2138 case OPCODE_IB_MAC_IOCB: 2139 case OPCODE_IB_3032_MAC_IOCB: 2140 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2141 net_rsp); 2142 work_done++; 2143 break; 2144 2145 case OPCODE_IB_IP_IOCB: 2146 case OPCODE_IB_3032_IP_IOCB: 2147 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2148 net_rsp); 2149 work_done++; 2150 break; 2151 default: { 2152 u32 *tmp = (u32 *)net_rsp; 2153 netdev_err(ndev, 2154 "Hit default case, not handled!\n" 2155 " dropping the packet, opcode = %x\n" 2156 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2157 net_rsp->opcode, 2158 (unsigned long int)tmp[0], 2159 (unsigned long int)tmp[1], 2160 (unsigned long int)tmp[2], 2161 (unsigned long int)tmp[3]); 2162 } 2163 } 2164 2165 qdev->rsp_consumer_index++; 2166 2167 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2168 qdev->rsp_consumer_index = 0; 2169 qdev->rsp_current = qdev->rsp_q_virt_addr; 2170 } else { 2171 qdev->rsp_current++; 2172 } 2173 2174 } 2175 2176 return work_done; 2177 } 2178 2179 static int ql_poll(struct napi_struct *napi, int budget) 2180 { 2181 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2182 struct ql3xxx_port_registers __iomem *port_regs = 2183 qdev->mem_map_registers; 2184 int work_done; 2185 2186 work_done = ql_tx_rx_clean(qdev, budget); 2187 2188 if (work_done < budget && napi_complete_done(napi, work_done)) { 2189 unsigned long flags; 2190 2191 spin_lock_irqsave(&qdev->hw_lock, flags); 2192 ql_update_small_bufq_prod_index(qdev); 2193 ql_update_lrg_bufq_prod_index(qdev); 2194 writel(qdev->rsp_consumer_index, 2195 &port_regs->CommonRegs.rspQConsumerIndex); 2196 spin_unlock_irqrestore(&qdev->hw_lock, flags); 2197 2198 ql_enable_interrupts(qdev); 2199 } 2200 return work_done; 2201 } 2202 2203 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2204 { 2205 2206 struct net_device *ndev = dev_id; 2207 struct ql3_adapter *qdev = netdev_priv(ndev); 2208 struct ql3xxx_port_registers __iomem *port_regs = 2209 qdev->mem_map_registers; 2210 u32 value; 2211 int handled = 1; 2212 u32 var; 2213 2214 value = ql_read_common_reg_l(qdev, 2215 &port_regs->CommonRegs.ispControlStatus); 2216 2217 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2218 spin_lock(&qdev->adapter_lock); 2219 netif_stop_queue(qdev->ndev); 2220 netif_carrier_off(qdev->ndev); 2221 ql_disable_interrupts(qdev); 2222 qdev->port_link_state = LS_DOWN; 2223 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2224 2225 if (value & ISP_CONTROL_FE) { 2226 /* 2227 * Chip Fatal Error. 2228 */ 2229 var = 2230 ql_read_page0_reg_l(qdev, 2231 &port_regs->PortFatalErrStatus); 2232 netdev_warn(ndev, 2233 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2234 var); 2235 set_bit(QL_RESET_START, &qdev->flags) ; 2236 } else { 2237 /* 2238 * Soft Reset Requested. 2239 */ 2240 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2241 netdev_err(ndev, 2242 "Another function issued a reset to the chip. ISR value = %x\n", 2243 value); 2244 } 2245 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2246 spin_unlock(&qdev->adapter_lock); 2247 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2248 ql_disable_interrupts(qdev); 2249 if (likely(napi_schedule_prep(&qdev->napi))) 2250 __napi_schedule(&qdev->napi); 2251 } else 2252 return IRQ_NONE; 2253 2254 return IRQ_RETVAL(handled); 2255 } 2256 2257 /* 2258 * Get the total number of segments needed for the given number of fragments. 2259 * This is necessary because outbound address lists (OAL) will be used when 2260 * more than two frags are given. Each address list has 5 addr/len pairs. 2261 * The 5th pair in each OAL is used to point to the next OAL if more frags 2262 * are coming. That is why the frags:segment count ratio is not linear. 2263 */ 2264 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2265 { 2266 if (qdev->device_id == QL3022_DEVICE_ID) 2267 return 1; 2268 2269 if (frags <= 2) 2270 return frags + 1; 2271 else if (frags <= 6) 2272 return frags + 2; 2273 else if (frags <= 10) 2274 return frags + 3; 2275 else if (frags <= 14) 2276 return frags + 4; 2277 else if (frags <= 18) 2278 return frags + 5; 2279 return -1; 2280 } 2281 2282 static void ql_hw_csum_setup(const struct sk_buff *skb, 2283 struct ob_mac_iocb_req *mac_iocb_ptr) 2284 { 2285 const struct iphdr *ip = ip_hdr(skb); 2286 2287 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2288 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2289 2290 if (ip->protocol == IPPROTO_TCP) { 2291 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2292 OB_3032MAC_IOCB_REQ_IC; 2293 } else { 2294 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2295 OB_3032MAC_IOCB_REQ_IC; 2296 } 2297 2298 } 2299 2300 /* 2301 * Map the buffers for this transmit. 2302 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2303 */ 2304 static int ql_send_map(struct ql3_adapter *qdev, 2305 struct ob_mac_iocb_req *mac_iocb_ptr, 2306 struct ql_tx_buf_cb *tx_cb, 2307 struct sk_buff *skb) 2308 { 2309 struct oal *oal; 2310 struct oal_entry *oal_entry; 2311 int len = skb_headlen(skb); 2312 dma_addr_t map; 2313 int err; 2314 int completed_segs, i; 2315 int seg_cnt, seg = 0; 2316 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2317 2318 seg_cnt = tx_cb->seg_count; 2319 /* 2320 * Map the skb buffer first. 2321 */ 2322 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2323 2324 err = pci_dma_mapping_error(qdev->pdev, map); 2325 if (err) { 2326 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2327 err); 2328 2329 return NETDEV_TX_BUSY; 2330 } 2331 2332 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2333 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2334 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2335 oal_entry->len = cpu_to_le32(len); 2336 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2337 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2338 seg++; 2339 2340 if (seg_cnt == 1) { 2341 /* Terminate the last segment. */ 2342 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2343 return NETDEV_TX_OK; 2344 } 2345 oal = tx_cb->oal; 2346 for (completed_segs = 0; 2347 completed_segs < frag_cnt; 2348 completed_segs++, seg++) { 2349 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2350 oal_entry++; 2351 /* 2352 * Check for continuation requirements. 2353 * It's strange but necessary. 2354 * Continuation entry points to outbound address list. 2355 */ 2356 if ((seg == 2 && seg_cnt > 3) || 2357 (seg == 7 && seg_cnt > 8) || 2358 (seg == 12 && seg_cnt > 13) || 2359 (seg == 17 && seg_cnt > 18)) { 2360 map = pci_map_single(qdev->pdev, oal, 2361 sizeof(struct oal), 2362 PCI_DMA_TODEVICE); 2363 2364 err = pci_dma_mapping_error(qdev->pdev, map); 2365 if (err) { 2366 netdev_err(qdev->ndev, 2367 "PCI mapping outbound address list with error: %d\n", 2368 err); 2369 goto map_error; 2370 } 2371 2372 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2373 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2374 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2375 OAL_CONT_ENTRY); 2376 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2377 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2378 sizeof(struct oal)); 2379 oal_entry = (struct oal_entry *)oal; 2380 oal++; 2381 seg++; 2382 } 2383 2384 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2385 DMA_TO_DEVICE); 2386 2387 err = dma_mapping_error(&qdev->pdev->dev, map); 2388 if (err) { 2389 netdev_err(qdev->ndev, 2390 "PCI mapping frags failed with error: %d\n", 2391 err); 2392 goto map_error; 2393 } 2394 2395 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2396 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2397 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2398 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2399 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2400 } 2401 /* Terminate the last segment. */ 2402 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2403 return NETDEV_TX_OK; 2404 2405 map_error: 2406 /* A PCI mapping failed and now we will need to back out 2407 * We need to traverse through the oal's and associated pages which 2408 * have been mapped and now we must unmap them to clean up properly 2409 */ 2410 2411 seg = 1; 2412 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2413 oal = tx_cb->oal; 2414 for (i = 0; i < completed_segs; i++, seg++) { 2415 oal_entry++; 2416 2417 /* 2418 * Check for continuation requirements. 2419 * It's strange but necessary. 2420 */ 2421 2422 if ((seg == 2 && seg_cnt > 3) || 2423 (seg == 7 && seg_cnt > 8) || 2424 (seg == 12 && seg_cnt > 13) || 2425 (seg == 17 && seg_cnt > 18)) { 2426 pci_unmap_single(qdev->pdev, 2427 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2428 dma_unmap_len(&tx_cb->map[seg], maplen), 2429 PCI_DMA_TODEVICE); 2430 oal++; 2431 seg++; 2432 } 2433 2434 pci_unmap_page(qdev->pdev, 2435 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2436 dma_unmap_len(&tx_cb->map[seg], maplen), 2437 PCI_DMA_TODEVICE); 2438 } 2439 2440 pci_unmap_single(qdev->pdev, 2441 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2442 dma_unmap_addr(&tx_cb->map[0], maplen), 2443 PCI_DMA_TODEVICE); 2444 2445 return NETDEV_TX_BUSY; 2446 2447 } 2448 2449 /* 2450 * The difference between 3022 and 3032 sends: 2451 * 3022 only supports a simple single segment transmission. 2452 * 3032 supports checksumming and scatter/gather lists (fragments). 2453 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2454 * in the IOCB plus a chain of outbound address lists (OAL) that 2455 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2456 * will be used to point to an OAL when more ALP entries are required. 2457 * The IOCB is always the top of the chain followed by one or more 2458 * OALs (when necessary). 2459 */ 2460 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2461 struct net_device *ndev) 2462 { 2463 struct ql3_adapter *qdev = netdev_priv(ndev); 2464 struct ql3xxx_port_registers __iomem *port_regs = 2465 qdev->mem_map_registers; 2466 struct ql_tx_buf_cb *tx_cb; 2467 u32 tot_len = skb->len; 2468 struct ob_mac_iocb_req *mac_iocb_ptr; 2469 2470 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2471 return NETDEV_TX_BUSY; 2472 2473 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2474 tx_cb->seg_count = ql_get_seg_count(qdev, 2475 skb_shinfo(skb)->nr_frags); 2476 if (tx_cb->seg_count == -1) { 2477 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2478 return NETDEV_TX_OK; 2479 } 2480 2481 mac_iocb_ptr = tx_cb->queue_entry; 2482 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2483 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2484 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2485 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2486 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2487 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2488 tx_cb->skb = skb; 2489 if (qdev->device_id == QL3032_DEVICE_ID && 2490 skb->ip_summed == CHECKSUM_PARTIAL) 2491 ql_hw_csum_setup(skb, mac_iocb_ptr); 2492 2493 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2494 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2495 return NETDEV_TX_BUSY; 2496 } 2497 2498 wmb(); 2499 qdev->req_producer_index++; 2500 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2501 qdev->req_producer_index = 0; 2502 wmb(); 2503 ql_write_common_reg_l(qdev, 2504 &port_regs->CommonRegs.reqQProducerIndex, 2505 qdev->req_producer_index); 2506 2507 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2508 "tx queued, slot %d, len %d\n", 2509 qdev->req_producer_index, skb->len); 2510 2511 atomic_dec(&qdev->tx_count); 2512 return NETDEV_TX_OK; 2513 } 2514 2515 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2516 { 2517 qdev->req_q_size = 2518 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2519 2520 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2521 2522 /* The barrier is required to ensure request and response queue 2523 * addr writes to the registers. 2524 */ 2525 wmb(); 2526 2527 qdev->req_q_virt_addr = 2528 pci_alloc_consistent(qdev->pdev, 2529 (size_t) qdev->req_q_size, 2530 &qdev->req_q_phy_addr); 2531 2532 if ((qdev->req_q_virt_addr == NULL) || 2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2534 netdev_err(qdev->ndev, "reqQ failed\n"); 2535 return -ENOMEM; 2536 } 2537 2538 qdev->rsp_q_virt_addr = 2539 pci_alloc_consistent(qdev->pdev, 2540 (size_t) qdev->rsp_q_size, 2541 &qdev->rsp_q_phy_addr); 2542 2543 if ((qdev->rsp_q_virt_addr == NULL) || 2544 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2545 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2546 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2547 qdev->req_q_virt_addr, 2548 qdev->req_q_phy_addr); 2549 return -ENOMEM; 2550 } 2551 2552 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2553 2554 return 0; 2555 } 2556 2557 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2558 { 2559 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2560 netdev_info(qdev->ndev, "Already done\n"); 2561 return; 2562 } 2563 2564 pci_free_consistent(qdev->pdev, 2565 qdev->req_q_size, 2566 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2567 2568 qdev->req_q_virt_addr = NULL; 2569 2570 pci_free_consistent(qdev->pdev, 2571 qdev->rsp_q_size, 2572 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2573 2574 qdev->rsp_q_virt_addr = NULL; 2575 2576 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2577 } 2578 2579 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2580 { 2581 /* Create Large Buffer Queue */ 2582 qdev->lrg_buf_q_size = 2583 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2584 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2585 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2586 else 2587 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2588 2589 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2590 sizeof(struct ql_rcv_buf_cb), 2591 GFP_KERNEL); 2592 if (qdev->lrg_buf == NULL) 2593 return -ENOMEM; 2594 2595 qdev->lrg_buf_q_alloc_virt_addr = 2596 pci_alloc_consistent(qdev->pdev, 2597 qdev->lrg_buf_q_alloc_size, 2598 &qdev->lrg_buf_q_alloc_phy_addr); 2599 2600 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2601 netdev_err(qdev->ndev, "lBufQ failed\n"); 2602 return -ENOMEM; 2603 } 2604 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2605 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2606 2607 /* Create Small Buffer Queue */ 2608 qdev->small_buf_q_size = 2609 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2610 if (qdev->small_buf_q_size < PAGE_SIZE) 2611 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2612 else 2613 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2614 2615 qdev->small_buf_q_alloc_virt_addr = 2616 pci_alloc_consistent(qdev->pdev, 2617 qdev->small_buf_q_alloc_size, 2618 &qdev->small_buf_q_alloc_phy_addr); 2619 2620 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2621 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2622 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2623 qdev->lrg_buf_q_alloc_virt_addr, 2624 qdev->lrg_buf_q_alloc_phy_addr); 2625 return -ENOMEM; 2626 } 2627 2628 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2629 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2630 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2631 return 0; 2632 } 2633 2634 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2635 { 2636 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2637 netdev_info(qdev->ndev, "Already done\n"); 2638 return; 2639 } 2640 kfree(qdev->lrg_buf); 2641 pci_free_consistent(qdev->pdev, 2642 qdev->lrg_buf_q_alloc_size, 2643 qdev->lrg_buf_q_alloc_virt_addr, 2644 qdev->lrg_buf_q_alloc_phy_addr); 2645 2646 qdev->lrg_buf_q_virt_addr = NULL; 2647 2648 pci_free_consistent(qdev->pdev, 2649 qdev->small_buf_q_alloc_size, 2650 qdev->small_buf_q_alloc_virt_addr, 2651 qdev->small_buf_q_alloc_phy_addr); 2652 2653 qdev->small_buf_q_virt_addr = NULL; 2654 2655 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2656 } 2657 2658 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2659 { 2660 int i; 2661 struct bufq_addr_element *small_buf_q_entry; 2662 2663 /* Currently we allocate on one of memory and use it for smallbuffers */ 2664 qdev->small_buf_total_size = 2665 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2666 QL_SMALL_BUFFER_SIZE); 2667 2668 qdev->small_buf_virt_addr = 2669 pci_alloc_consistent(qdev->pdev, 2670 qdev->small_buf_total_size, 2671 &qdev->small_buf_phy_addr); 2672 2673 if (qdev->small_buf_virt_addr == NULL) { 2674 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2675 return -ENOMEM; 2676 } 2677 2678 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2679 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2680 2681 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2682 2683 /* Initialize the small buffer queue. */ 2684 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2685 small_buf_q_entry->addr_high = 2686 cpu_to_le32(qdev->small_buf_phy_addr_high); 2687 small_buf_q_entry->addr_low = 2688 cpu_to_le32(qdev->small_buf_phy_addr_low + 2689 (i * QL_SMALL_BUFFER_SIZE)); 2690 small_buf_q_entry++; 2691 } 2692 qdev->small_buf_index = 0; 2693 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2694 return 0; 2695 } 2696 2697 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2698 { 2699 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2700 netdev_info(qdev->ndev, "Already done\n"); 2701 return; 2702 } 2703 if (qdev->small_buf_virt_addr != NULL) { 2704 pci_free_consistent(qdev->pdev, 2705 qdev->small_buf_total_size, 2706 qdev->small_buf_virt_addr, 2707 qdev->small_buf_phy_addr); 2708 2709 qdev->small_buf_virt_addr = NULL; 2710 } 2711 } 2712 2713 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2714 { 2715 int i = 0; 2716 struct ql_rcv_buf_cb *lrg_buf_cb; 2717 2718 for (i = 0; i < qdev->num_large_buffers; i++) { 2719 lrg_buf_cb = &qdev->lrg_buf[i]; 2720 if (lrg_buf_cb->skb) { 2721 dev_kfree_skb(lrg_buf_cb->skb); 2722 pci_unmap_single(qdev->pdev, 2723 dma_unmap_addr(lrg_buf_cb, mapaddr), 2724 dma_unmap_len(lrg_buf_cb, maplen), 2725 PCI_DMA_FROMDEVICE); 2726 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2727 } else { 2728 break; 2729 } 2730 } 2731 } 2732 2733 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2734 { 2735 int i; 2736 struct ql_rcv_buf_cb *lrg_buf_cb; 2737 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2738 2739 for (i = 0; i < qdev->num_large_buffers; i++) { 2740 lrg_buf_cb = &qdev->lrg_buf[i]; 2741 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2742 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2743 buf_addr_ele++; 2744 } 2745 qdev->lrg_buf_index = 0; 2746 qdev->lrg_buf_skb_check = 0; 2747 } 2748 2749 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2750 { 2751 int i; 2752 struct ql_rcv_buf_cb *lrg_buf_cb; 2753 struct sk_buff *skb; 2754 dma_addr_t map; 2755 int err; 2756 2757 for (i = 0; i < qdev->num_large_buffers; i++) { 2758 lrg_buf_cb = &qdev->lrg_buf[i]; 2759 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2760 2761 skb = netdev_alloc_skb(qdev->ndev, 2762 qdev->lrg_buffer_len); 2763 if (unlikely(!skb)) { 2764 /* Better luck next round */ 2765 netdev_err(qdev->ndev, 2766 "large buff alloc failed for %d bytes at index %d\n", 2767 qdev->lrg_buffer_len * 2, i); 2768 ql_free_large_buffers(qdev); 2769 return -ENOMEM; 2770 } else { 2771 lrg_buf_cb->index = i; 2772 /* 2773 * We save some space to copy the ethhdr from first 2774 * buffer 2775 */ 2776 skb_reserve(skb, QL_HEADER_SPACE); 2777 map = pci_map_single(qdev->pdev, 2778 skb->data, 2779 qdev->lrg_buffer_len - 2780 QL_HEADER_SPACE, 2781 PCI_DMA_FROMDEVICE); 2782 2783 err = pci_dma_mapping_error(qdev->pdev, map); 2784 if (err) { 2785 netdev_err(qdev->ndev, 2786 "PCI mapping failed with error: %d\n", 2787 err); 2788 dev_kfree_skb_irq(skb); 2789 ql_free_large_buffers(qdev); 2790 return -ENOMEM; 2791 } 2792 2793 lrg_buf_cb->skb = skb; 2794 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2795 dma_unmap_len_set(lrg_buf_cb, maplen, 2796 qdev->lrg_buffer_len - 2797 QL_HEADER_SPACE); 2798 lrg_buf_cb->buf_phy_addr_low = 2799 cpu_to_le32(LS_64BITS(map)); 2800 lrg_buf_cb->buf_phy_addr_high = 2801 cpu_to_le32(MS_64BITS(map)); 2802 } 2803 } 2804 return 0; 2805 } 2806 2807 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2808 { 2809 struct ql_tx_buf_cb *tx_cb; 2810 int i; 2811 2812 tx_cb = &qdev->tx_buf[0]; 2813 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2814 kfree(tx_cb->oal); 2815 tx_cb->oal = NULL; 2816 tx_cb++; 2817 } 2818 } 2819 2820 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2821 { 2822 struct ql_tx_buf_cb *tx_cb; 2823 int i; 2824 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2825 2826 /* Create free list of transmit buffers */ 2827 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2828 2829 tx_cb = &qdev->tx_buf[i]; 2830 tx_cb->skb = NULL; 2831 tx_cb->queue_entry = req_q_curr; 2832 req_q_curr++; 2833 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2834 if (tx_cb->oal == NULL) 2835 return -ENOMEM; 2836 } 2837 return 0; 2838 } 2839 2840 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2841 { 2842 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2843 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2844 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2845 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2846 /* 2847 * Bigger buffers, so less of them. 2848 */ 2849 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2850 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2851 } else { 2852 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2853 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2854 return -ENOMEM; 2855 } 2856 qdev->num_large_buffers = 2857 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2858 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2859 qdev->max_frame_size = 2860 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2861 2862 /* 2863 * First allocate a page of shared memory and use it for shadow 2864 * locations of Network Request Queue Consumer Address Register and 2865 * Network Completion Queue Producer Index Register 2866 */ 2867 qdev->shadow_reg_virt_addr = 2868 pci_alloc_consistent(qdev->pdev, 2869 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2870 2871 if (qdev->shadow_reg_virt_addr != NULL) { 2872 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2873 qdev->req_consumer_index_phy_addr_high = 2874 MS_64BITS(qdev->shadow_reg_phy_addr); 2875 qdev->req_consumer_index_phy_addr_low = 2876 LS_64BITS(qdev->shadow_reg_phy_addr); 2877 2878 qdev->prsp_producer_index = 2879 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2880 qdev->rsp_producer_index_phy_addr_high = 2881 qdev->req_consumer_index_phy_addr_high; 2882 qdev->rsp_producer_index_phy_addr_low = 2883 qdev->req_consumer_index_phy_addr_low + 8; 2884 } else { 2885 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2886 return -ENOMEM; 2887 } 2888 2889 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2890 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2891 goto err_req_rsp; 2892 } 2893 2894 if (ql_alloc_buffer_queues(qdev) != 0) { 2895 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2896 goto err_buffer_queues; 2897 } 2898 2899 if (ql_alloc_small_buffers(qdev) != 0) { 2900 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2901 goto err_small_buffers; 2902 } 2903 2904 if (ql_alloc_large_buffers(qdev) != 0) { 2905 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2906 goto err_small_buffers; 2907 } 2908 2909 /* Initialize the large buffer queue. */ 2910 ql_init_large_buffers(qdev); 2911 if (ql_create_send_free_list(qdev)) 2912 goto err_free_list; 2913 2914 qdev->rsp_current = qdev->rsp_q_virt_addr; 2915 2916 return 0; 2917 err_free_list: 2918 ql_free_send_free_list(qdev); 2919 err_small_buffers: 2920 ql_free_buffer_queues(qdev); 2921 err_buffer_queues: 2922 ql_free_net_req_rsp_queues(qdev); 2923 err_req_rsp: 2924 pci_free_consistent(qdev->pdev, 2925 PAGE_SIZE, 2926 qdev->shadow_reg_virt_addr, 2927 qdev->shadow_reg_phy_addr); 2928 2929 return -ENOMEM; 2930 } 2931 2932 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2933 { 2934 ql_free_send_free_list(qdev); 2935 ql_free_large_buffers(qdev); 2936 ql_free_small_buffers(qdev); 2937 ql_free_buffer_queues(qdev); 2938 ql_free_net_req_rsp_queues(qdev); 2939 if (qdev->shadow_reg_virt_addr != NULL) { 2940 pci_free_consistent(qdev->pdev, 2941 PAGE_SIZE, 2942 qdev->shadow_reg_virt_addr, 2943 qdev->shadow_reg_phy_addr); 2944 qdev->shadow_reg_virt_addr = NULL; 2945 } 2946 } 2947 2948 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2949 { 2950 struct ql3xxx_local_ram_registers __iomem *local_ram = 2951 (void __iomem *)qdev->mem_map_registers; 2952 2953 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2954 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2955 2) << 4)) 2956 return -1; 2957 2958 ql_write_page2_reg(qdev, 2959 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2960 2961 ql_write_page2_reg(qdev, 2962 &local_ram->maxBufletCount, 2963 qdev->nvram_data.bufletCount); 2964 2965 ql_write_page2_reg(qdev, 2966 &local_ram->freeBufletThresholdLow, 2967 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2968 (qdev->nvram_data.tcpWindowThreshold0)); 2969 2970 ql_write_page2_reg(qdev, 2971 &local_ram->freeBufletThresholdHigh, 2972 qdev->nvram_data.tcpWindowThreshold50); 2973 2974 ql_write_page2_reg(qdev, 2975 &local_ram->ipHashTableBase, 2976 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2977 qdev->nvram_data.ipHashTableBaseLo); 2978 ql_write_page2_reg(qdev, 2979 &local_ram->ipHashTableCount, 2980 qdev->nvram_data.ipHashTableSize); 2981 ql_write_page2_reg(qdev, 2982 &local_ram->tcpHashTableBase, 2983 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2984 qdev->nvram_data.tcpHashTableBaseLo); 2985 ql_write_page2_reg(qdev, 2986 &local_ram->tcpHashTableCount, 2987 qdev->nvram_data.tcpHashTableSize); 2988 ql_write_page2_reg(qdev, 2989 &local_ram->ncbBase, 2990 (qdev->nvram_data.ncbTableBaseHi << 16) | 2991 qdev->nvram_data.ncbTableBaseLo); 2992 ql_write_page2_reg(qdev, 2993 &local_ram->maxNcbCount, 2994 qdev->nvram_data.ncbTableSize); 2995 ql_write_page2_reg(qdev, 2996 &local_ram->drbBase, 2997 (qdev->nvram_data.drbTableBaseHi << 16) | 2998 qdev->nvram_data.drbTableBaseLo); 2999 ql_write_page2_reg(qdev, 3000 &local_ram->maxDrbCount, 3001 qdev->nvram_data.drbTableSize); 3002 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3003 return 0; 3004 } 3005 3006 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3007 { 3008 u32 value; 3009 struct ql3xxx_port_registers __iomem *port_regs = 3010 qdev->mem_map_registers; 3011 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3012 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3013 (void __iomem *)port_regs; 3014 u32 delay = 10; 3015 int status = 0; 3016 3017 if (ql_mii_setup(qdev)) 3018 return -1; 3019 3020 /* Bring out PHY out of reset */ 3021 ql_write_common_reg(qdev, spir, 3022 (ISP_SERIAL_PORT_IF_WE | 3023 (ISP_SERIAL_PORT_IF_WE << 16))); 3024 /* Give the PHY time to come out of reset. */ 3025 mdelay(100); 3026 qdev->port_link_state = LS_DOWN; 3027 netif_carrier_off(qdev->ndev); 3028 3029 /* V2 chip fix for ARS-39168. */ 3030 ql_write_common_reg(qdev, spir, 3031 (ISP_SERIAL_PORT_IF_SDE | 3032 (ISP_SERIAL_PORT_IF_SDE << 16))); 3033 3034 /* Request Queue Registers */ 3035 *((u32 *)(qdev->preq_consumer_index)) = 0; 3036 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3037 qdev->req_producer_index = 0; 3038 3039 ql_write_page1_reg(qdev, 3040 &hmem_regs->reqConsumerIndexAddrHigh, 3041 qdev->req_consumer_index_phy_addr_high); 3042 ql_write_page1_reg(qdev, 3043 &hmem_regs->reqConsumerIndexAddrLow, 3044 qdev->req_consumer_index_phy_addr_low); 3045 3046 ql_write_page1_reg(qdev, 3047 &hmem_regs->reqBaseAddrHigh, 3048 MS_64BITS(qdev->req_q_phy_addr)); 3049 ql_write_page1_reg(qdev, 3050 &hmem_regs->reqBaseAddrLow, 3051 LS_64BITS(qdev->req_q_phy_addr)); 3052 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3053 3054 /* Response Queue Registers */ 3055 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3056 qdev->rsp_consumer_index = 0; 3057 qdev->rsp_current = qdev->rsp_q_virt_addr; 3058 3059 ql_write_page1_reg(qdev, 3060 &hmem_regs->rspProducerIndexAddrHigh, 3061 qdev->rsp_producer_index_phy_addr_high); 3062 3063 ql_write_page1_reg(qdev, 3064 &hmem_regs->rspProducerIndexAddrLow, 3065 qdev->rsp_producer_index_phy_addr_low); 3066 3067 ql_write_page1_reg(qdev, 3068 &hmem_regs->rspBaseAddrHigh, 3069 MS_64BITS(qdev->rsp_q_phy_addr)); 3070 3071 ql_write_page1_reg(qdev, 3072 &hmem_regs->rspBaseAddrLow, 3073 LS_64BITS(qdev->rsp_q_phy_addr)); 3074 3075 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3076 3077 /* Large Buffer Queue */ 3078 ql_write_page1_reg(qdev, 3079 &hmem_regs->rxLargeQBaseAddrHigh, 3080 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3081 3082 ql_write_page1_reg(qdev, 3083 &hmem_regs->rxLargeQBaseAddrLow, 3084 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3085 3086 ql_write_page1_reg(qdev, 3087 &hmem_regs->rxLargeQLength, 3088 qdev->num_lbufq_entries); 3089 3090 ql_write_page1_reg(qdev, 3091 &hmem_regs->rxLargeBufferLength, 3092 qdev->lrg_buffer_len); 3093 3094 /* Small Buffer Queue */ 3095 ql_write_page1_reg(qdev, 3096 &hmem_regs->rxSmallQBaseAddrHigh, 3097 MS_64BITS(qdev->small_buf_q_phy_addr)); 3098 3099 ql_write_page1_reg(qdev, 3100 &hmem_regs->rxSmallQBaseAddrLow, 3101 LS_64BITS(qdev->small_buf_q_phy_addr)); 3102 3103 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3104 ql_write_page1_reg(qdev, 3105 &hmem_regs->rxSmallBufferLength, 3106 QL_SMALL_BUFFER_SIZE); 3107 3108 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3109 qdev->small_buf_release_cnt = 8; 3110 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3111 qdev->lrg_buf_release_cnt = 8; 3112 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3113 qdev->small_buf_index = 0; 3114 qdev->lrg_buf_index = 0; 3115 qdev->lrg_buf_free_count = 0; 3116 qdev->lrg_buf_free_head = NULL; 3117 qdev->lrg_buf_free_tail = NULL; 3118 3119 ql_write_common_reg(qdev, 3120 &port_regs->CommonRegs. 3121 rxSmallQProducerIndex, 3122 qdev->small_buf_q_producer_index); 3123 ql_write_common_reg(qdev, 3124 &port_regs->CommonRegs. 3125 rxLargeQProducerIndex, 3126 qdev->lrg_buf_q_producer_index); 3127 3128 /* 3129 * Find out if the chip has already been initialized. If it has, then 3130 * we skip some of the initialization. 3131 */ 3132 clear_bit(QL_LINK_MASTER, &qdev->flags); 3133 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3134 if ((value & PORT_STATUS_IC) == 0) { 3135 3136 /* Chip has not been configured yet, so let it rip. */ 3137 if (ql_init_misc_registers(qdev)) { 3138 status = -1; 3139 goto out; 3140 } 3141 3142 value = qdev->nvram_data.tcpMaxWindowSize; 3143 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3144 3145 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3146 3147 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3148 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3149 * 2) << 13)) { 3150 status = -1; 3151 goto out; 3152 } 3153 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3154 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3155 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3156 16) | (INTERNAL_CHIP_SD | 3157 INTERNAL_CHIP_WE))); 3158 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3159 } 3160 3161 if (qdev->mac_index) 3162 ql_write_page0_reg(qdev, 3163 &port_regs->mac1MaxFrameLengthReg, 3164 qdev->max_frame_size); 3165 else 3166 ql_write_page0_reg(qdev, 3167 &port_regs->mac0MaxFrameLengthReg, 3168 qdev->max_frame_size); 3169 3170 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3171 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3172 2) << 7)) { 3173 status = -1; 3174 goto out; 3175 } 3176 3177 PHY_Setup(qdev); 3178 ql_init_scan_mode(qdev); 3179 ql_get_phy_owner(qdev); 3180 3181 /* Load the MAC Configuration */ 3182 3183 /* Program lower 32 bits of the MAC address */ 3184 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3185 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3186 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3187 ((qdev->ndev->dev_addr[2] << 24) 3188 | (qdev->ndev->dev_addr[3] << 16) 3189 | (qdev->ndev->dev_addr[4] << 8) 3190 | qdev->ndev->dev_addr[5])); 3191 3192 /* Program top 16 bits of the MAC address */ 3193 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3194 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3195 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3196 ((qdev->ndev->dev_addr[0] << 8) 3197 | qdev->ndev->dev_addr[1])); 3198 3199 /* Enable Primary MAC */ 3200 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3201 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3202 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3203 3204 /* Clear Primary and Secondary IP addresses */ 3205 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3206 ((IP_ADDR_INDEX_REG_MASK << 16) | 3207 (qdev->mac_index << 2))); 3208 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3209 3210 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3211 ((IP_ADDR_INDEX_REG_MASK << 16) | 3212 ((qdev->mac_index << 2) + 1))); 3213 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3214 3215 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3216 3217 /* Indicate Configuration Complete */ 3218 ql_write_page0_reg(qdev, 3219 &port_regs->portControl, 3220 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3221 3222 do { 3223 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3224 if (value & PORT_STATUS_IC) 3225 break; 3226 spin_unlock_irq(&qdev->hw_lock); 3227 msleep(500); 3228 spin_lock_irq(&qdev->hw_lock); 3229 } while (--delay); 3230 3231 if (delay == 0) { 3232 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3233 status = -1; 3234 goto out; 3235 } 3236 3237 /* Enable Ethernet Function */ 3238 if (qdev->device_id == QL3032_DEVICE_ID) { 3239 value = 3240 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3241 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3242 QL3032_PORT_CONTROL_ET); 3243 ql_write_page0_reg(qdev, &port_regs->functionControl, 3244 ((value << 16) | value)); 3245 } else { 3246 value = 3247 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3248 PORT_CONTROL_HH); 3249 ql_write_page0_reg(qdev, &port_regs->portControl, 3250 ((value << 16) | value)); 3251 } 3252 3253 3254 out: 3255 return status; 3256 } 3257 3258 /* 3259 * Caller holds hw_lock. 3260 */ 3261 static int ql_adapter_reset(struct ql3_adapter *qdev) 3262 { 3263 struct ql3xxx_port_registers __iomem *port_regs = 3264 qdev->mem_map_registers; 3265 int status = 0; 3266 u16 value; 3267 int max_wait_time; 3268 3269 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3270 clear_bit(QL_RESET_DONE, &qdev->flags); 3271 3272 /* 3273 * Issue soft reset to chip. 3274 */ 3275 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3276 ql_write_common_reg(qdev, 3277 &port_regs->CommonRegs.ispControlStatus, 3278 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3279 3280 /* Wait 3 seconds for reset to complete. */ 3281 netdev_printk(KERN_DEBUG, qdev->ndev, 3282 "Wait 10 milliseconds for reset to complete\n"); 3283 3284 /* Wait until the firmware tells us the Soft Reset is done */ 3285 max_wait_time = 5; 3286 do { 3287 value = 3288 ql_read_common_reg(qdev, 3289 &port_regs->CommonRegs.ispControlStatus); 3290 if ((value & ISP_CONTROL_SR) == 0) 3291 break; 3292 3293 ssleep(1); 3294 } while ((--max_wait_time)); 3295 3296 /* 3297 * Also, make sure that the Network Reset Interrupt bit has been 3298 * cleared after the soft reset has taken place. 3299 */ 3300 value = 3301 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3302 if (value & ISP_CONTROL_RI) { 3303 netdev_printk(KERN_DEBUG, qdev->ndev, 3304 "clearing RI after reset\n"); 3305 ql_write_common_reg(qdev, 3306 &port_regs->CommonRegs. 3307 ispControlStatus, 3308 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3309 } 3310 3311 if (max_wait_time == 0) { 3312 /* Issue Force Soft Reset */ 3313 ql_write_common_reg(qdev, 3314 &port_regs->CommonRegs. 3315 ispControlStatus, 3316 ((ISP_CONTROL_FSR << 16) | 3317 ISP_CONTROL_FSR)); 3318 /* 3319 * Wait until the firmware tells us the Force Soft Reset is 3320 * done 3321 */ 3322 max_wait_time = 5; 3323 do { 3324 value = ql_read_common_reg(qdev, 3325 &port_regs->CommonRegs. 3326 ispControlStatus); 3327 if ((value & ISP_CONTROL_FSR) == 0) 3328 break; 3329 ssleep(1); 3330 } while ((--max_wait_time)); 3331 } 3332 if (max_wait_time == 0) 3333 status = 1; 3334 3335 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3336 set_bit(QL_RESET_DONE, &qdev->flags); 3337 return status; 3338 } 3339 3340 static void ql_set_mac_info(struct ql3_adapter *qdev) 3341 { 3342 struct ql3xxx_port_registers __iomem *port_regs = 3343 qdev->mem_map_registers; 3344 u32 value, port_status; 3345 u8 func_number; 3346 3347 /* Get the function number */ 3348 value = 3349 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3350 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3351 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3352 switch (value & ISP_CONTROL_FN_MASK) { 3353 case ISP_CONTROL_FN0_NET: 3354 qdev->mac_index = 0; 3355 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3356 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3357 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3358 if (port_status & PORT_STATUS_SM0) 3359 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3360 else 3361 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3362 break; 3363 3364 case ISP_CONTROL_FN1_NET: 3365 qdev->mac_index = 1; 3366 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3367 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3368 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3369 if (port_status & PORT_STATUS_SM1) 3370 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3371 else 3372 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3373 break; 3374 3375 case ISP_CONTROL_FN0_SCSI: 3376 case ISP_CONTROL_FN1_SCSI: 3377 default: 3378 netdev_printk(KERN_DEBUG, qdev->ndev, 3379 "Invalid function number, ispControlStatus = 0x%x\n", 3380 value); 3381 break; 3382 } 3383 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3384 } 3385 3386 static void ql_display_dev_info(struct net_device *ndev) 3387 { 3388 struct ql3_adapter *qdev = netdev_priv(ndev); 3389 struct pci_dev *pdev = qdev->pdev; 3390 3391 netdev_info(ndev, 3392 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3393 DRV_NAME, qdev->index, qdev->chip_rev_id, 3394 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3395 qdev->pci_slot); 3396 netdev_info(ndev, "%s Interface\n", 3397 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3398 3399 /* 3400 * Print PCI bus width/type. 3401 */ 3402 netdev_info(ndev, "Bus interface is %s %s\n", 3403 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3404 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3405 3406 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3407 qdev->mem_map_registers); 3408 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3409 3410 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3411 } 3412 3413 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3414 { 3415 struct net_device *ndev = qdev->ndev; 3416 int retval = 0; 3417 3418 netif_stop_queue(ndev); 3419 netif_carrier_off(ndev); 3420 3421 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3422 clear_bit(QL_LINK_MASTER, &qdev->flags); 3423 3424 ql_disable_interrupts(qdev); 3425 3426 free_irq(qdev->pdev->irq, ndev); 3427 3428 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3429 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3430 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3431 pci_disable_msi(qdev->pdev); 3432 } 3433 3434 del_timer_sync(&qdev->adapter_timer); 3435 3436 napi_disable(&qdev->napi); 3437 3438 if (do_reset) { 3439 int soft_reset; 3440 unsigned long hw_flags; 3441 3442 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3443 if (ql_wait_for_drvr_lock(qdev)) { 3444 soft_reset = ql_adapter_reset(qdev); 3445 if (soft_reset) { 3446 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3447 qdev->index); 3448 } 3449 netdev_err(ndev, 3450 "Releasing driver lock via chip reset\n"); 3451 } else { 3452 netdev_err(ndev, 3453 "Could not acquire driver lock to do reset!\n"); 3454 retval = -1; 3455 } 3456 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3457 } 3458 ql_free_mem_resources(qdev); 3459 return retval; 3460 } 3461 3462 static int ql_adapter_up(struct ql3_adapter *qdev) 3463 { 3464 struct net_device *ndev = qdev->ndev; 3465 int err; 3466 unsigned long irq_flags = IRQF_SHARED; 3467 unsigned long hw_flags; 3468 3469 if (ql_alloc_mem_resources(qdev)) { 3470 netdev_err(ndev, "Unable to allocate buffers\n"); 3471 return -ENOMEM; 3472 } 3473 3474 if (qdev->msi) { 3475 if (pci_enable_msi(qdev->pdev)) { 3476 netdev_err(ndev, 3477 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3478 qdev->msi = 0; 3479 } else { 3480 netdev_info(ndev, "MSI Enabled...\n"); 3481 set_bit(QL_MSI_ENABLED, &qdev->flags); 3482 irq_flags &= ~IRQF_SHARED; 3483 } 3484 } 3485 3486 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3487 irq_flags, ndev->name, ndev); 3488 if (err) { 3489 netdev_err(ndev, 3490 "Failed to reserve interrupt %d - already in use\n", 3491 qdev->pdev->irq); 3492 goto err_irq; 3493 } 3494 3495 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3496 3497 err = ql_wait_for_drvr_lock(qdev); 3498 if (err) { 3499 err = ql_adapter_initialize(qdev); 3500 if (err) { 3501 netdev_err(ndev, "Unable to initialize adapter\n"); 3502 goto err_init; 3503 } 3504 netdev_err(ndev, "Releasing driver lock\n"); 3505 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3506 } else { 3507 netdev_err(ndev, "Could not acquire driver lock\n"); 3508 goto err_lock; 3509 } 3510 3511 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3512 3513 set_bit(QL_ADAPTER_UP, &qdev->flags); 3514 3515 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3516 3517 napi_enable(&qdev->napi); 3518 ql_enable_interrupts(qdev); 3519 return 0; 3520 3521 err_init: 3522 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3523 err_lock: 3524 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3525 free_irq(qdev->pdev->irq, ndev); 3526 err_irq: 3527 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3528 netdev_info(ndev, "calling pci_disable_msi()\n"); 3529 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3530 pci_disable_msi(qdev->pdev); 3531 } 3532 return err; 3533 } 3534 3535 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3536 { 3537 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3538 netdev_err(qdev->ndev, 3539 "Driver up/down cycle failed, closing device\n"); 3540 rtnl_lock(); 3541 dev_close(qdev->ndev); 3542 rtnl_unlock(); 3543 return -1; 3544 } 3545 return 0; 3546 } 3547 3548 static int ql3xxx_close(struct net_device *ndev) 3549 { 3550 struct ql3_adapter *qdev = netdev_priv(ndev); 3551 3552 /* 3553 * Wait for device to recover from a reset. 3554 * (Rarely happens, but possible.) 3555 */ 3556 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3557 msleep(50); 3558 3559 ql_adapter_down(qdev, QL_DO_RESET); 3560 return 0; 3561 } 3562 3563 static int ql3xxx_open(struct net_device *ndev) 3564 { 3565 struct ql3_adapter *qdev = netdev_priv(ndev); 3566 return ql_adapter_up(qdev); 3567 } 3568 3569 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3570 { 3571 struct ql3_adapter *qdev = netdev_priv(ndev); 3572 struct ql3xxx_port_registers __iomem *port_regs = 3573 qdev->mem_map_registers; 3574 struct sockaddr *addr = p; 3575 unsigned long hw_flags; 3576 3577 if (netif_running(ndev)) 3578 return -EBUSY; 3579 3580 if (!is_valid_ether_addr(addr->sa_data)) 3581 return -EADDRNOTAVAIL; 3582 3583 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3584 3585 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3586 /* Program lower 32 bits of the MAC address */ 3587 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3588 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3589 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3590 ((ndev->dev_addr[2] << 24) | (ndev-> 3591 dev_addr[3] << 16) | 3592 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3593 3594 /* Program top 16 bits of the MAC address */ 3595 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3596 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3597 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3598 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3599 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3600 3601 return 0; 3602 } 3603 3604 static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue) 3605 { 3606 struct ql3_adapter *qdev = netdev_priv(ndev); 3607 3608 netdev_err(ndev, "Resetting...\n"); 3609 /* 3610 * Stop the queues, we've got a problem. 3611 */ 3612 netif_stop_queue(ndev); 3613 3614 /* 3615 * Wake up the worker to process this event. 3616 */ 3617 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3618 } 3619 3620 static void ql_reset_work(struct work_struct *work) 3621 { 3622 struct ql3_adapter *qdev = 3623 container_of(work, struct ql3_adapter, reset_work.work); 3624 struct net_device *ndev = qdev->ndev; 3625 u32 value; 3626 struct ql_tx_buf_cb *tx_cb; 3627 int max_wait_time, i; 3628 struct ql3xxx_port_registers __iomem *port_regs = 3629 qdev->mem_map_registers; 3630 unsigned long hw_flags; 3631 3632 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3633 clear_bit(QL_LINK_MASTER, &qdev->flags); 3634 3635 /* 3636 * Loop through the active list and return the skb. 3637 */ 3638 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3639 int j; 3640 tx_cb = &qdev->tx_buf[i]; 3641 if (tx_cb->skb) { 3642 netdev_printk(KERN_DEBUG, ndev, 3643 "Freeing lost SKB\n"); 3644 pci_unmap_single(qdev->pdev, 3645 dma_unmap_addr(&tx_cb->map[0], 3646 mapaddr), 3647 dma_unmap_len(&tx_cb->map[0], maplen), 3648 PCI_DMA_TODEVICE); 3649 for (j = 1; j < tx_cb->seg_count; j++) { 3650 pci_unmap_page(qdev->pdev, 3651 dma_unmap_addr(&tx_cb->map[j], 3652 mapaddr), 3653 dma_unmap_len(&tx_cb->map[j], 3654 maplen), 3655 PCI_DMA_TODEVICE); 3656 } 3657 dev_kfree_skb(tx_cb->skb); 3658 tx_cb->skb = NULL; 3659 } 3660 } 3661 3662 netdev_err(ndev, "Clearing NRI after reset\n"); 3663 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3664 ql_write_common_reg(qdev, 3665 &port_regs->CommonRegs. 3666 ispControlStatus, 3667 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3668 /* 3669 * Wait the for Soft Reset to Complete. 3670 */ 3671 max_wait_time = 10; 3672 do { 3673 value = ql_read_common_reg(qdev, 3674 &port_regs->CommonRegs. 3675 3676 ispControlStatus); 3677 if ((value & ISP_CONTROL_SR) == 0) { 3678 netdev_printk(KERN_DEBUG, ndev, 3679 "reset completed\n"); 3680 break; 3681 } 3682 3683 if (value & ISP_CONTROL_RI) { 3684 netdev_printk(KERN_DEBUG, ndev, 3685 "clearing NRI after reset\n"); 3686 ql_write_common_reg(qdev, 3687 &port_regs-> 3688 CommonRegs. 3689 ispControlStatus, 3690 ((ISP_CONTROL_RI << 3691 16) | ISP_CONTROL_RI)); 3692 } 3693 3694 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3695 ssleep(1); 3696 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3697 } while (--max_wait_time); 3698 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3699 3700 if (value & ISP_CONTROL_SR) { 3701 3702 /* 3703 * Set the reset flags and clear the board again. 3704 * Nothing else to do... 3705 */ 3706 netdev_err(ndev, 3707 "Timed out waiting for reset to complete\n"); 3708 netdev_err(ndev, "Do a reset\n"); 3709 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3710 clear_bit(QL_RESET_START, &qdev->flags); 3711 ql_cycle_adapter(qdev, QL_DO_RESET); 3712 return; 3713 } 3714 3715 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3716 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3717 clear_bit(QL_RESET_START, &qdev->flags); 3718 ql_cycle_adapter(qdev, QL_NO_RESET); 3719 } 3720 } 3721 3722 static void ql_tx_timeout_work(struct work_struct *work) 3723 { 3724 struct ql3_adapter *qdev = 3725 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3726 3727 ql_cycle_adapter(qdev, QL_DO_RESET); 3728 } 3729 3730 static void ql_get_board_info(struct ql3_adapter *qdev) 3731 { 3732 struct ql3xxx_port_registers __iomem *port_regs = 3733 qdev->mem_map_registers; 3734 u32 value; 3735 3736 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3737 3738 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3739 if (value & PORT_STATUS_64) 3740 qdev->pci_width = 64; 3741 else 3742 qdev->pci_width = 32; 3743 if (value & PORT_STATUS_X) 3744 qdev->pci_x = 1; 3745 else 3746 qdev->pci_x = 0; 3747 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3748 } 3749 3750 static void ql3xxx_timer(struct timer_list *t) 3751 { 3752 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer); 3753 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3754 } 3755 3756 static const struct net_device_ops ql3xxx_netdev_ops = { 3757 .ndo_open = ql3xxx_open, 3758 .ndo_start_xmit = ql3xxx_send, 3759 .ndo_stop = ql3xxx_close, 3760 .ndo_validate_addr = eth_validate_addr, 3761 .ndo_set_mac_address = ql3xxx_set_mac_address, 3762 .ndo_tx_timeout = ql3xxx_tx_timeout, 3763 }; 3764 3765 static int ql3xxx_probe(struct pci_dev *pdev, 3766 const struct pci_device_id *pci_entry) 3767 { 3768 struct net_device *ndev = NULL; 3769 struct ql3_adapter *qdev = NULL; 3770 static int cards_found; 3771 int pci_using_dac, err; 3772 3773 err = pci_enable_device(pdev); 3774 if (err) { 3775 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3776 goto err_out; 3777 } 3778 3779 err = pci_request_regions(pdev, DRV_NAME); 3780 if (err) { 3781 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3782 goto err_out_disable_pdev; 3783 } 3784 3785 pci_set_master(pdev); 3786 3787 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3788 pci_using_dac = 1; 3789 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3790 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3791 pci_using_dac = 0; 3792 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3793 } 3794 3795 if (err) { 3796 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3797 goto err_out_free_regions; 3798 } 3799 3800 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3801 if (!ndev) { 3802 err = -ENOMEM; 3803 goto err_out_free_regions; 3804 } 3805 3806 SET_NETDEV_DEV(ndev, &pdev->dev); 3807 3808 pci_set_drvdata(pdev, ndev); 3809 3810 qdev = netdev_priv(ndev); 3811 qdev->index = cards_found; 3812 qdev->ndev = ndev; 3813 qdev->pdev = pdev; 3814 qdev->device_id = pci_entry->device; 3815 qdev->port_link_state = LS_DOWN; 3816 if (msi) 3817 qdev->msi = 1; 3818 3819 qdev->msg_enable = netif_msg_init(debug, default_msg); 3820 3821 if (pci_using_dac) 3822 ndev->features |= NETIF_F_HIGHDMA; 3823 if (qdev->device_id == QL3032_DEVICE_ID) 3824 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3825 3826 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3827 if (!qdev->mem_map_registers) { 3828 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3829 err = -EIO; 3830 goto err_out_free_ndev; 3831 } 3832 3833 spin_lock_init(&qdev->adapter_lock); 3834 spin_lock_init(&qdev->hw_lock); 3835 3836 /* Set driver entry points */ 3837 ndev->netdev_ops = &ql3xxx_netdev_ops; 3838 ndev->ethtool_ops = &ql3xxx_ethtool_ops; 3839 ndev->watchdog_timeo = 5 * HZ; 3840 3841 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3842 3843 ndev->irq = pdev->irq; 3844 3845 /* make sure the EEPROM is good */ 3846 if (ql_get_nvram_params(qdev)) { 3847 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3848 __func__, qdev->index); 3849 err = -EIO; 3850 goto err_out_iounmap; 3851 } 3852 3853 ql_set_mac_info(qdev); 3854 3855 /* Validate and set parameters */ 3856 if (qdev->mac_index) { 3857 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3858 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3859 } else { 3860 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3861 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3862 } 3863 3864 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3865 3866 /* Record PCI bus information. */ 3867 ql_get_board_info(qdev); 3868 3869 /* 3870 * Set the Maximum Memory Read Byte Count value. We do this to handle 3871 * jumbo frames. 3872 */ 3873 if (qdev->pci_x) 3874 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3875 3876 err = register_netdev(ndev); 3877 if (err) { 3878 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3879 goto err_out_iounmap; 3880 } 3881 3882 /* we're going to reset, so assume we have no link for now */ 3883 3884 netif_carrier_off(ndev); 3885 netif_stop_queue(ndev); 3886 3887 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3888 if (!qdev->workqueue) { 3889 unregister_netdev(ndev); 3890 err = -ENOMEM; 3891 goto err_out_iounmap; 3892 } 3893 3894 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3895 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3896 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3897 3898 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0); 3899 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3900 3901 if (!cards_found) { 3902 pr_alert("%s\n", DRV_STRING); 3903 pr_alert("Driver name: %s, Version: %s\n", 3904 DRV_NAME, DRV_VERSION); 3905 } 3906 ql_display_dev_info(ndev); 3907 3908 cards_found++; 3909 return 0; 3910 3911 err_out_iounmap: 3912 iounmap(qdev->mem_map_registers); 3913 err_out_free_ndev: 3914 free_netdev(ndev); 3915 err_out_free_regions: 3916 pci_release_regions(pdev); 3917 err_out_disable_pdev: 3918 pci_disable_device(pdev); 3919 err_out: 3920 return err; 3921 } 3922 3923 static void ql3xxx_remove(struct pci_dev *pdev) 3924 { 3925 struct net_device *ndev = pci_get_drvdata(pdev); 3926 struct ql3_adapter *qdev = netdev_priv(ndev); 3927 3928 unregister_netdev(ndev); 3929 3930 ql_disable_interrupts(qdev); 3931 3932 if (qdev->workqueue) { 3933 cancel_delayed_work(&qdev->reset_work); 3934 cancel_delayed_work(&qdev->tx_timeout_work); 3935 destroy_workqueue(qdev->workqueue); 3936 qdev->workqueue = NULL; 3937 } 3938 3939 iounmap(qdev->mem_map_registers); 3940 pci_release_regions(pdev); 3941 free_netdev(ndev); 3942 } 3943 3944 static struct pci_driver ql3xxx_driver = { 3945 3946 .name = DRV_NAME, 3947 .id_table = ql3xxx_pci_tbl, 3948 .probe = ql3xxx_probe, 3949 .remove = ql3xxx_remove, 3950 }; 3951 3952 module_pci_driver(ql3xxx_driver); 3953