1 /* 2 * Copyright (C) 2003 - 2009 NetXen, Inc. 3 * Copyright (C) 2009 - QLogic Corporation. 4 * All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * 19 * The full GNU General Public License is included in this distribution 20 * in the file called "COPYING". 21 * 22 */ 23 24 #include <linux/netdevice.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <net/checksum.h> 29 #include "netxen_nic.h" 30 #include "netxen_nic_hw.h" 31 32 struct crb_addr_pair { 33 u32 addr; 34 u32 data; 35 }; 36 37 #define NETXEN_MAX_CRB_XFORM 60 38 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; 39 #define NETXEN_ADDR_ERROR (0xffffffff) 40 41 #define crb_addr_transform(name) \ 42 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ 43 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 44 45 #define NETXEN_NIC_XDMA_RESET 0x8000ff 46 47 static void 48 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 49 struct nx_host_rds_ring *rds_ring); 50 static int netxen_p3_has_mn(struct netxen_adapter *adapter); 51 52 static void crb_addr_transform_setup(void) 53 { 54 crb_addr_transform(XDMA); 55 crb_addr_transform(TIMR); 56 crb_addr_transform(SRE); 57 crb_addr_transform(SQN3); 58 crb_addr_transform(SQN2); 59 crb_addr_transform(SQN1); 60 crb_addr_transform(SQN0); 61 crb_addr_transform(SQS3); 62 crb_addr_transform(SQS2); 63 crb_addr_transform(SQS1); 64 crb_addr_transform(SQS0); 65 crb_addr_transform(RPMX7); 66 crb_addr_transform(RPMX6); 67 crb_addr_transform(RPMX5); 68 crb_addr_transform(RPMX4); 69 crb_addr_transform(RPMX3); 70 crb_addr_transform(RPMX2); 71 crb_addr_transform(RPMX1); 72 crb_addr_transform(RPMX0); 73 crb_addr_transform(ROMUSB); 74 crb_addr_transform(SN); 75 crb_addr_transform(QMN); 76 crb_addr_transform(QMS); 77 crb_addr_transform(PGNI); 78 crb_addr_transform(PGND); 79 crb_addr_transform(PGN3); 80 crb_addr_transform(PGN2); 81 crb_addr_transform(PGN1); 82 crb_addr_transform(PGN0); 83 crb_addr_transform(PGSI); 84 crb_addr_transform(PGSD); 85 crb_addr_transform(PGS3); 86 crb_addr_transform(PGS2); 87 crb_addr_transform(PGS1); 88 crb_addr_transform(PGS0); 89 crb_addr_transform(PS); 90 crb_addr_transform(PH); 91 crb_addr_transform(NIU); 92 crb_addr_transform(I2Q); 93 crb_addr_transform(EG); 94 crb_addr_transform(MN); 95 crb_addr_transform(MS); 96 crb_addr_transform(CAS2); 97 crb_addr_transform(CAS1); 98 crb_addr_transform(CAS0); 99 crb_addr_transform(CAM); 100 crb_addr_transform(C2C1); 101 crb_addr_transform(C2C0); 102 crb_addr_transform(SMB); 103 crb_addr_transform(OCM0); 104 crb_addr_transform(I2C0); 105 } 106 107 void netxen_release_rx_buffers(struct netxen_adapter *adapter) 108 { 109 struct netxen_recv_context *recv_ctx; 110 struct nx_host_rds_ring *rds_ring; 111 struct netxen_rx_buffer *rx_buf; 112 int i, ring; 113 114 recv_ctx = &adapter->recv_ctx; 115 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 116 rds_ring = &recv_ctx->rds_rings[ring]; 117 for (i = 0; i < rds_ring->num_desc; ++i) { 118 rx_buf = &(rds_ring->rx_buf_arr[i]); 119 if (rx_buf->state == NETXEN_BUFFER_FREE) 120 continue; 121 pci_unmap_single(adapter->pdev, 122 rx_buf->dma, 123 rds_ring->dma_size, 124 PCI_DMA_FROMDEVICE); 125 if (rx_buf->skb != NULL) 126 dev_kfree_skb_any(rx_buf->skb); 127 } 128 } 129 } 130 131 void netxen_release_tx_buffers(struct netxen_adapter *adapter) 132 { 133 struct netxen_cmd_buffer *cmd_buf; 134 struct netxen_skb_frag *buffrag; 135 int i, j; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 137 138 spin_lock_bh(&adapter->tx_clean_lock); 139 cmd_buf = tx_ring->cmd_buf_arr; 140 for (i = 0; i < tx_ring->num_desc; i++) { 141 buffrag = cmd_buf->frag_array; 142 if (buffrag->dma) { 143 pci_unmap_single(adapter->pdev, buffrag->dma, 144 buffrag->length, PCI_DMA_TODEVICE); 145 buffrag->dma = 0ULL; 146 } 147 for (j = 1; j < cmd_buf->frag_count; j++) { 148 buffrag++; 149 if (buffrag->dma) { 150 pci_unmap_page(adapter->pdev, buffrag->dma, 151 buffrag->length, 152 PCI_DMA_TODEVICE); 153 buffrag->dma = 0ULL; 154 } 155 } 156 if (cmd_buf->skb) { 157 dev_kfree_skb_any(cmd_buf->skb); 158 cmd_buf->skb = NULL; 159 } 160 cmd_buf++; 161 } 162 spin_unlock_bh(&adapter->tx_clean_lock); 163 } 164 165 void netxen_free_sw_resources(struct netxen_adapter *adapter) 166 { 167 struct netxen_recv_context *recv_ctx; 168 struct nx_host_rds_ring *rds_ring; 169 struct nx_host_tx_ring *tx_ring; 170 int ring; 171 172 recv_ctx = &adapter->recv_ctx; 173 174 if (recv_ctx->rds_rings == NULL) 175 goto skip_rds; 176 177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 178 rds_ring = &recv_ctx->rds_rings[ring]; 179 vfree(rds_ring->rx_buf_arr); 180 rds_ring->rx_buf_arr = NULL; 181 } 182 kfree(recv_ctx->rds_rings); 183 184 skip_rds: 185 if (adapter->tx_ring == NULL) 186 return; 187 188 tx_ring = adapter->tx_ring; 189 vfree(tx_ring->cmd_buf_arr); 190 kfree(tx_ring); 191 adapter->tx_ring = NULL; 192 } 193 194 int netxen_alloc_sw_resources(struct netxen_adapter *adapter) 195 { 196 struct netxen_recv_context *recv_ctx; 197 struct nx_host_rds_ring *rds_ring; 198 struct nx_host_sds_ring *sds_ring; 199 struct nx_host_tx_ring *tx_ring; 200 struct netxen_rx_buffer *rx_buf; 201 int ring, i; 202 203 struct netxen_cmd_buffer *cmd_buf_arr; 204 struct net_device *netdev = adapter->netdev; 205 206 tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL); 207 if (tx_ring == NULL) 208 return -ENOMEM; 209 210 adapter->tx_ring = tx_ring; 211 212 tx_ring->num_desc = adapter->num_txd; 213 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 214 215 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); 216 if (cmd_buf_arr == NULL) 217 goto err_out; 218 219 tx_ring->cmd_buf_arr = cmd_buf_arr; 220 221 recv_ctx = &adapter->recv_ctx; 222 223 rds_ring = kcalloc(adapter->max_rds_rings, 224 sizeof(struct nx_host_rds_ring), GFP_KERNEL); 225 if (rds_ring == NULL) 226 goto err_out; 227 228 recv_ctx->rds_rings = rds_ring; 229 230 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 231 rds_ring = &recv_ctx->rds_rings[ring]; 232 switch (ring) { 233 case RCV_RING_NORMAL: 234 rds_ring->num_desc = adapter->num_rxd; 235 if (adapter->ahw.cut_through) { 236 rds_ring->dma_size = 237 NX_CT_DEFAULT_RX_BUF_LEN; 238 rds_ring->skb_size = 239 NX_CT_DEFAULT_RX_BUF_LEN; 240 } else { 241 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 242 rds_ring->dma_size = 243 NX_P3_RX_BUF_MAX_LEN; 244 else 245 rds_ring->dma_size = 246 NX_P2_RX_BUF_MAX_LEN; 247 rds_ring->skb_size = 248 rds_ring->dma_size + NET_IP_ALIGN; 249 } 250 break; 251 252 case RCV_RING_JUMBO: 253 rds_ring->num_desc = adapter->num_jumbo_rxd; 254 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 255 rds_ring->dma_size = 256 NX_P3_RX_JUMBO_BUF_MAX_LEN; 257 else 258 rds_ring->dma_size = 259 NX_P2_RX_JUMBO_BUF_MAX_LEN; 260 261 if (adapter->capabilities & NX_CAP0_HW_LRO) 262 rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; 263 264 rds_ring->skb_size = 265 rds_ring->dma_size + NET_IP_ALIGN; 266 break; 267 268 case RCV_RING_LRO: 269 rds_ring->num_desc = adapter->num_lro_rxd; 270 rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; 271 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; 272 break; 273 274 } 275 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); 276 if (rds_ring->rx_buf_arr == NULL) 277 /* free whatever was already allocated */ 278 goto err_out; 279 280 INIT_LIST_HEAD(&rds_ring->free_list); 281 /* 282 * Now go through all of them, set reference handles 283 * and put them in the queues. 284 */ 285 rx_buf = rds_ring->rx_buf_arr; 286 for (i = 0; i < rds_ring->num_desc; i++) { 287 list_add_tail(&rx_buf->list, 288 &rds_ring->free_list); 289 rx_buf->ref_handle = i; 290 rx_buf->state = NETXEN_BUFFER_FREE; 291 rx_buf++; 292 } 293 spin_lock_init(&rds_ring->lock); 294 } 295 296 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 297 sds_ring = &recv_ctx->sds_rings[ring]; 298 sds_ring->irq = adapter->msix_entries[ring].vector; 299 sds_ring->adapter = adapter; 300 sds_ring->num_desc = adapter->num_rxd; 301 302 for (i = 0; i < NUM_RCV_DESC_RINGS; i++) 303 INIT_LIST_HEAD(&sds_ring->free_list[i]); 304 } 305 306 return 0; 307 308 err_out: 309 netxen_free_sw_resources(adapter); 310 return -ENOMEM; 311 } 312 313 /* 314 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB 315 * address to external PCI CRB address. 316 */ 317 static u32 netxen_decode_crb_addr(u32 addr) 318 { 319 int i; 320 u32 base_addr, offset, pci_base; 321 322 crb_addr_transform_setup(); 323 324 pci_base = NETXEN_ADDR_ERROR; 325 base_addr = addr & 0xfff00000; 326 offset = addr & 0x000fffff; 327 328 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { 329 if (crb_addr_xform[i] == base_addr) { 330 pci_base = i << 20; 331 break; 332 } 333 } 334 if (pci_base == NETXEN_ADDR_ERROR) 335 return pci_base; 336 else 337 return pci_base + offset; 338 } 339 340 #define NETXEN_MAX_ROM_WAIT_USEC 100 341 342 static int netxen_wait_rom_done(struct netxen_adapter *adapter) 343 { 344 long timeout = 0; 345 long done = 0; 346 347 cond_resched(); 348 349 while (done == 0) { 350 done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); 351 done &= 2; 352 if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { 353 dev_err(&adapter->pdev->dev, 354 "Timeout reached waiting for rom done"); 355 return -EIO; 356 } 357 udelay(1); 358 } 359 return 0; 360 } 361 362 static int do_rom_fast_read(struct netxen_adapter *adapter, 363 int addr, int *valp) 364 { 365 NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 366 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 367 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 368 NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 369 if (netxen_wait_rom_done(adapter)) { 370 printk("Error waiting for rom done\n"); 371 return -EIO; 372 } 373 /* reset abyte_cnt and dummy_byte_cnt */ 374 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 375 udelay(10); 376 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 377 378 *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); 379 return 0; 380 } 381 382 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 383 u8 *bytes, size_t size) 384 { 385 int addridx; 386 int ret = 0; 387 388 for (addridx = addr; addridx < (addr + size); addridx += 4) { 389 int v; 390 ret = do_rom_fast_read(adapter, addridx, &v); 391 if (ret != 0) 392 break; 393 *(__le32 *)bytes = cpu_to_le32(v); 394 bytes += 4; 395 } 396 397 return ret; 398 } 399 400 int 401 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 402 u8 *bytes, size_t size) 403 { 404 int ret; 405 406 ret = netxen_rom_lock(adapter); 407 if (ret < 0) 408 return ret; 409 410 ret = do_rom_fast_read_words(adapter, addr, bytes, size); 411 412 netxen_rom_unlock(adapter); 413 return ret; 414 } 415 416 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) 417 { 418 int ret; 419 420 if (netxen_rom_lock(adapter) != 0) 421 return -EIO; 422 423 ret = do_rom_fast_read(adapter, addr, valp); 424 netxen_rom_unlock(adapter); 425 return ret; 426 } 427 428 #define NETXEN_BOARDTYPE 0x4008 429 #define NETXEN_BOARDNUM 0x400c 430 #define NETXEN_CHIPNUM 0x4010 431 432 int netxen_pinit_from_rom(struct netxen_adapter *adapter) 433 { 434 int addr, val; 435 int i, n, init_delay = 0; 436 struct crb_addr_pair *buf; 437 unsigned offset; 438 u32 off; 439 440 /* resetall */ 441 netxen_rom_lock(adapter); 442 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff); 443 netxen_rom_unlock(adapter); 444 445 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 446 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 447 (n != 0xcafecafe) || 448 netxen_rom_fast_read(adapter, 4, &n) != 0) { 449 printk(KERN_ERR "%s: ERROR Reading crb_init area: " 450 "n: %08x\n", netxen_nic_driver_name, n); 451 return -EIO; 452 } 453 offset = n & 0xffffU; 454 n = (n >> 16) & 0xffffU; 455 } else { 456 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 457 !(n & 0x80000000)) { 458 printk(KERN_ERR "%s: ERROR Reading crb_init area: " 459 "n: %08x\n", netxen_nic_driver_name, n); 460 return -EIO; 461 } 462 offset = 1; 463 n &= ~0x80000000; 464 } 465 466 if (n >= 1024) { 467 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" 468 " initialized.\n", __func__, n); 469 return -EIO; 470 } 471 472 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); 473 if (buf == NULL) 474 return -ENOMEM; 475 476 for (i = 0; i < n; i++) { 477 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 478 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { 479 kfree(buf); 480 return -EIO; 481 } 482 483 buf[i].addr = addr; 484 buf[i].data = val; 485 486 } 487 488 for (i = 0; i < n; i++) { 489 490 off = netxen_decode_crb_addr(buf[i].addr); 491 if (off == NETXEN_ADDR_ERROR) { 492 printk(KERN_ERR"CRB init value out of range %x\n", 493 buf[i].addr); 494 continue; 495 } 496 off += NETXEN_PCI_CRBSPACE; 497 498 if (off & 1) 499 continue; 500 501 /* skipping cold reboot MAGIC */ 502 if (off == NETXEN_CAM_RAM(0x1fc)) 503 continue; 504 505 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 506 if (off == (NETXEN_CRB_I2C0 + 0x1c)) 507 continue; 508 /* do not reset PCI */ 509 if (off == (ROMUSB_GLB + 0xbc)) 510 continue; 511 if (off == (ROMUSB_GLB + 0xa8)) 512 continue; 513 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ 514 continue; 515 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ 516 continue; 517 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 518 continue; 519 if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) 520 continue; 521 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && 522 !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) 523 buf[i].data = 0x1020; 524 /* skip the function enable register */ 525 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) 526 continue; 527 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) 528 continue; 529 if ((off & 0x0ff00000) == NETXEN_CRB_SMB) 530 continue; 531 } 532 533 init_delay = 1; 534 /* After writing this register, HW needs time for CRB */ 535 /* to quiet down (else crb_window returns 0xffffffff) */ 536 if (off == NETXEN_ROMUSB_GLB_SW_RESET) { 537 init_delay = 1000; 538 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 539 /* hold xdma in reset also */ 540 buf[i].data = NETXEN_NIC_XDMA_RESET; 541 buf[i].data = 0x8000ff; 542 } 543 } 544 545 NXWR32(adapter, off, buf[i].data); 546 547 msleep(init_delay); 548 } 549 kfree(buf); 550 551 /* disable_peg_cache_all */ 552 553 /* unreset_net_cache */ 554 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 555 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); 556 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); 557 } 558 559 /* p2dn replyCount */ 560 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); 561 /* disable_peg_cache 0 */ 562 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); 563 /* disable_peg_cache 1 */ 564 NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); 565 566 /* peg_clr_all */ 567 568 /* peg_clr 0 */ 569 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); 570 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); 571 /* peg_clr 1 */ 572 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); 573 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); 574 /* peg_clr 2 */ 575 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); 576 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); 577 /* peg_clr 3 */ 578 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); 579 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); 580 return 0; 581 } 582 583 static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) 584 { 585 uint32_t i; 586 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 587 __le32 entries = cpu_to_le32(directory->num_entries); 588 589 for (i = 0; i < entries; i++) { 590 591 __le32 offs = cpu_to_le32(directory->findex) + 592 (i * cpu_to_le32(directory->entry_size)); 593 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); 594 595 if (tab_type == section) 596 return (struct uni_table_desc *) &unirom[offs]; 597 } 598 599 return NULL; 600 } 601 602 #define QLCNIC_FILEHEADER_SIZE (14 * 4) 603 604 static int 605 netxen_nic_validate_header(struct netxen_adapter *adapter) 606 { 607 const u8 *unirom = adapter->fw->data; 608 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 609 u32 fw_file_size = adapter->fw->size; 610 u32 tab_size; 611 __le32 entries; 612 __le32 entry_size; 613 614 if (fw_file_size < QLCNIC_FILEHEADER_SIZE) 615 return -EINVAL; 616 617 entries = cpu_to_le32(directory->num_entries); 618 entry_size = cpu_to_le32(directory->entry_size); 619 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); 620 621 if (fw_file_size < tab_size) 622 return -EINVAL; 623 624 return 0; 625 } 626 627 static int 628 netxen_nic_validate_bootld(struct netxen_adapter *adapter) 629 { 630 struct uni_table_desc *tab_desc; 631 struct uni_data_desc *descr; 632 const u8 *unirom = adapter->fw->data; 633 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 634 NX_UNI_BOOTLD_IDX_OFF)); 635 u32 offs; 636 u32 tab_size; 637 u32 data_size; 638 639 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); 640 641 if (!tab_desc) 642 return -EINVAL; 643 644 tab_size = cpu_to_le32(tab_desc->findex) + 645 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 646 647 if (adapter->fw->size < tab_size) 648 return -EINVAL; 649 650 offs = cpu_to_le32(tab_desc->findex) + 651 (cpu_to_le32(tab_desc->entry_size) * (idx)); 652 descr = (struct uni_data_desc *)&unirom[offs]; 653 654 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 655 656 if (adapter->fw->size < data_size) 657 return -EINVAL; 658 659 return 0; 660 } 661 662 static int 663 netxen_nic_validate_fw(struct netxen_adapter *adapter) 664 { 665 struct uni_table_desc *tab_desc; 666 struct uni_data_desc *descr; 667 const u8 *unirom = adapter->fw->data; 668 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 669 NX_UNI_FIRMWARE_IDX_OFF)); 670 u32 offs; 671 u32 tab_size; 672 u32 data_size; 673 674 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); 675 676 if (!tab_desc) 677 return -EINVAL; 678 679 tab_size = cpu_to_le32(tab_desc->findex) + 680 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 681 682 if (adapter->fw->size < tab_size) 683 return -EINVAL; 684 685 offs = cpu_to_le32(tab_desc->findex) + 686 (cpu_to_le32(tab_desc->entry_size) * (idx)); 687 descr = (struct uni_data_desc *)&unirom[offs]; 688 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 689 690 if (adapter->fw->size < data_size) 691 return -EINVAL; 692 693 return 0; 694 } 695 696 697 static int 698 netxen_nic_validate_product_offs(struct netxen_adapter *adapter) 699 { 700 struct uni_table_desc *ptab_descr; 701 const u8 *unirom = adapter->fw->data; 702 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 703 1 : netxen_p3_has_mn(adapter); 704 __le32 entries; 705 __le32 entry_size; 706 u32 tab_size; 707 u32 i; 708 709 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); 710 if (ptab_descr == NULL) 711 return -EINVAL; 712 713 entries = cpu_to_le32(ptab_descr->num_entries); 714 entry_size = cpu_to_le32(ptab_descr->entry_size); 715 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); 716 717 if (adapter->fw->size < tab_size) 718 return -EINVAL; 719 720 nomn: 721 for (i = 0; i < entries; i++) { 722 723 __le32 flags, file_chiprev, offs; 724 u8 chiprev = adapter->ahw.revision_id; 725 uint32_t flagbit; 726 727 offs = cpu_to_le32(ptab_descr->findex) + 728 (i * cpu_to_le32(ptab_descr->entry_size)); 729 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); 730 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + 731 NX_UNI_CHIP_REV_OFF)); 732 733 flagbit = mn_present ? 1 : 2; 734 735 if ((chiprev == file_chiprev) && 736 ((1ULL << flagbit) & flags)) { 737 adapter->file_prd_off = offs; 738 return 0; 739 } 740 } 741 742 if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 743 mn_present = 0; 744 goto nomn; 745 } 746 747 return -EINVAL; 748 } 749 750 static int 751 netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) 752 { 753 if (netxen_nic_validate_header(adapter)) { 754 dev_err(&adapter->pdev->dev, 755 "unified image: header validation failed\n"); 756 return -EINVAL; 757 } 758 759 if (netxen_nic_validate_product_offs(adapter)) { 760 dev_err(&adapter->pdev->dev, 761 "unified image: product validation failed\n"); 762 return -EINVAL; 763 } 764 765 if (netxen_nic_validate_bootld(adapter)) { 766 dev_err(&adapter->pdev->dev, 767 "unified image: bootld validation failed\n"); 768 return -EINVAL; 769 } 770 771 if (netxen_nic_validate_fw(adapter)) { 772 dev_err(&adapter->pdev->dev, 773 "unified image: firmware validation failed\n"); 774 return -EINVAL; 775 } 776 777 return 0; 778 } 779 780 static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, 781 u32 section, u32 idx_offset) 782 { 783 const u8 *unirom = adapter->fw->data; 784 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 785 idx_offset)); 786 struct uni_table_desc *tab_desc; 787 __le32 offs; 788 789 tab_desc = nx_get_table_desc(unirom, section); 790 791 if (tab_desc == NULL) 792 return NULL; 793 794 offs = cpu_to_le32(tab_desc->findex) + 795 (cpu_to_le32(tab_desc->entry_size) * idx); 796 797 return (struct uni_data_desc *)&unirom[offs]; 798 } 799 800 static u8 * 801 nx_get_bootld_offs(struct netxen_adapter *adapter) 802 { 803 u32 offs = NETXEN_BOOTLD_START; 804 805 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 806 offs = cpu_to_le32((nx_get_data_desc(adapter, 807 NX_UNI_DIR_SECT_BOOTLD, 808 NX_UNI_BOOTLD_IDX_OFF))->findex); 809 810 return (u8 *)&adapter->fw->data[offs]; 811 } 812 813 static u8 * 814 nx_get_fw_offs(struct netxen_adapter *adapter) 815 { 816 u32 offs = NETXEN_IMAGE_START; 817 818 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 819 offs = cpu_to_le32((nx_get_data_desc(adapter, 820 NX_UNI_DIR_SECT_FW, 821 NX_UNI_FIRMWARE_IDX_OFF))->findex); 822 823 return (u8 *)&adapter->fw->data[offs]; 824 } 825 826 static __le32 827 nx_get_fw_size(struct netxen_adapter *adapter) 828 { 829 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 830 return cpu_to_le32((nx_get_data_desc(adapter, 831 NX_UNI_DIR_SECT_FW, 832 NX_UNI_FIRMWARE_IDX_OFF))->size); 833 else 834 return cpu_to_le32( 835 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); 836 } 837 838 static __le32 839 nx_get_fw_version(struct netxen_adapter *adapter) 840 { 841 struct uni_data_desc *fw_data_desc; 842 const struct firmware *fw = adapter->fw; 843 __le32 major, minor, sub; 844 const u8 *ver_str; 845 int i, ret = 0; 846 847 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 848 849 fw_data_desc = nx_get_data_desc(adapter, 850 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); 851 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + 852 cpu_to_le32(fw_data_desc->size) - 17; 853 854 for (i = 0; i < 12; i++) { 855 if (!strncmp(&ver_str[i], "REV=", 4)) { 856 ret = sscanf(&ver_str[i+4], "%u.%u.%u ", 857 &major, &minor, &sub); 858 break; 859 } 860 } 861 862 if (ret != 3) 863 return 0; 864 865 return major + (minor << 8) + (sub << 16); 866 867 } else 868 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 869 } 870 871 static __le32 872 nx_get_bios_version(struct netxen_adapter *adapter) 873 { 874 const struct firmware *fw = adapter->fw; 875 __le32 bios_ver, prd_off = adapter->file_prd_off; 876 877 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 878 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) 879 + NX_UNI_BIOS_VERSION_OFF)); 880 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + 881 (bios_ver >> 24); 882 } else 883 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 884 885 } 886 887 int 888 netxen_need_fw_reset(struct netxen_adapter *adapter) 889 { 890 u32 count, old_count; 891 u32 val, version, major, minor, build; 892 int i, timeout; 893 u8 fw_type; 894 895 /* NX2031 firmware doesn't support heartbit */ 896 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 897 return 1; 898 899 if (adapter->need_fw_reset) 900 return 1; 901 902 /* last attempt had failed */ 903 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 904 return 1; 905 906 old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 907 908 for (i = 0; i < 10; i++) { 909 910 timeout = msleep_interruptible(200); 911 if (timeout) { 912 NXWR32(adapter, CRB_CMDPEG_STATE, 913 PHAN_INITIALIZE_FAILED); 914 return -EINTR; 915 } 916 917 count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 918 if (count != old_count) 919 break; 920 } 921 922 /* firmware is dead */ 923 if (count == old_count) 924 return 1; 925 926 /* check if we have got newer or different file firmware */ 927 if (adapter->fw) { 928 929 val = nx_get_fw_version(adapter); 930 931 version = NETXEN_DECODE_VERSION(val); 932 933 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 934 minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 935 build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 936 937 if (version > NETXEN_VERSION_CODE(major, minor, build)) 938 return 1; 939 940 if (version == NETXEN_VERSION_CODE(major, minor, build) && 941 adapter->fw_type != NX_UNIFIED_ROMIMAGE) { 942 943 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 944 fw_type = (val & 0x4) ? 945 NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; 946 947 if (adapter->fw_type != fw_type) 948 return 1; 949 } 950 } 951 952 return 0; 953 } 954 955 #define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) 956 957 int 958 netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) 959 { 960 u32 flash_fw_ver, min_fw_ver; 961 962 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 963 return 0; 964 965 if (netxen_rom_fast_read(adapter, 966 NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { 967 dev_err(&adapter->pdev->dev, "Unable to read flash fw" 968 "version\n"); 969 return -EIO; 970 } 971 972 flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); 973 min_fw_ver = NETXEN_MIN_P3_FW_SUPP; 974 if (flash_fw_ver >= min_fw_ver) 975 return 0; 976 977 dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" 978 "[4.0.505]. Please update firmware on flash\n", 979 _major(flash_fw_ver), _minor(flash_fw_ver), 980 _build(flash_fw_ver)); 981 return -EINVAL; 982 } 983 984 static char *fw_name[] = { 985 NX_P2_MN_ROMIMAGE_NAME, 986 NX_P3_CT_ROMIMAGE_NAME, 987 NX_P3_MN_ROMIMAGE_NAME, 988 NX_UNIFIED_ROMIMAGE_NAME, 989 NX_FLASH_ROMIMAGE_NAME, 990 }; 991 992 int 993 netxen_load_firmware(struct netxen_adapter *adapter) 994 { 995 u64 *ptr64; 996 u32 i, flashaddr, size; 997 const struct firmware *fw = adapter->fw; 998 struct pci_dev *pdev = adapter->pdev; 999 1000 dev_info(&pdev->dev, "loading firmware from %s\n", 1001 fw_name[adapter->fw_type]); 1002 1003 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1004 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); 1005 1006 if (fw) { 1007 __le64 data; 1008 1009 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 1010 1011 ptr64 = (u64 *)nx_get_bootld_offs(adapter); 1012 flashaddr = NETXEN_BOOTLD_START; 1013 1014 for (i = 0; i < size; i++) { 1015 data = cpu_to_le64(ptr64[i]); 1016 1017 if (adapter->pci_mem_write(adapter, flashaddr, data)) 1018 return -EIO; 1019 1020 flashaddr += 8; 1021 } 1022 1023 size = (__force u32)nx_get_fw_size(adapter) / 8; 1024 1025 ptr64 = (u64 *)nx_get_fw_offs(adapter); 1026 flashaddr = NETXEN_IMAGE_START; 1027 1028 for (i = 0; i < size; i++) { 1029 data = cpu_to_le64(ptr64[i]); 1030 1031 if (adapter->pci_mem_write(adapter, 1032 flashaddr, data)) 1033 return -EIO; 1034 1035 flashaddr += 8; 1036 } 1037 1038 size = (__force u32)nx_get_fw_size(adapter) % 8; 1039 if (size) { 1040 data = cpu_to_le64(ptr64[i]); 1041 1042 if (adapter->pci_mem_write(adapter, 1043 flashaddr, data)) 1044 return -EIO; 1045 } 1046 1047 } else { 1048 u64 data; 1049 u32 hi, lo; 1050 1051 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 1052 flashaddr = NETXEN_BOOTLD_START; 1053 1054 for (i = 0; i < size; i++) { 1055 if (netxen_rom_fast_read(adapter, 1056 flashaddr, (int *)&lo) != 0) 1057 return -EIO; 1058 if (netxen_rom_fast_read(adapter, 1059 flashaddr + 4, (int *)&hi) != 0) 1060 return -EIO; 1061 1062 /* hi, lo are already in host endian byteorder */ 1063 data = (((u64)hi << 32) | lo); 1064 1065 if (adapter->pci_mem_write(adapter, 1066 flashaddr, data)) 1067 return -EIO; 1068 1069 flashaddr += 8; 1070 } 1071 } 1072 msleep(1); 1073 1074 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 1075 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); 1076 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); 1077 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1078 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); 1079 else { 1080 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); 1081 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); 1082 } 1083 1084 return 0; 1085 } 1086 1087 static int 1088 netxen_validate_firmware(struct netxen_adapter *adapter) 1089 { 1090 __le32 val; 1091 __le32 flash_fw_ver; 1092 u32 file_fw_ver, min_ver, bios; 1093 struct pci_dev *pdev = adapter->pdev; 1094 const struct firmware *fw = adapter->fw; 1095 u8 fw_type = adapter->fw_type; 1096 u32 crbinit_fix_fw; 1097 1098 if (fw_type == NX_UNIFIED_ROMIMAGE) { 1099 if (netxen_nic_validate_unified_romimage(adapter)) 1100 return -EINVAL; 1101 } else { 1102 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 1103 if ((__force u32)val != NETXEN_BDINFO_MAGIC) 1104 return -EINVAL; 1105 1106 if (fw->size < NX_FW_MIN_SIZE) 1107 return -EINVAL; 1108 } 1109 1110 val = nx_get_fw_version(adapter); 1111 1112 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1113 min_ver = NETXEN_MIN_P3_FW_SUPP; 1114 else 1115 min_ver = NETXEN_VERSION_CODE(3, 4, 216); 1116 1117 file_fw_ver = NETXEN_DECODE_VERSION(val); 1118 1119 if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || 1120 (file_fw_ver < min_ver)) { 1121 dev_err(&pdev->dev, 1122 "%s: firmware version %d.%d.%d unsupported\n", 1123 fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), 1124 _build(file_fw_ver)); 1125 return -EINVAL; 1126 } 1127 val = nx_get_bios_version(adapter); 1128 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); 1129 if ((__force u32)val != bios) { 1130 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 1131 fw_name[fw_type]); 1132 return -EINVAL; 1133 } 1134 1135 if (netxen_rom_fast_read(adapter, 1136 NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { 1137 dev_err(&pdev->dev, "Unable to read flash fw version\n"); 1138 return -EIO; 1139 } 1140 flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); 1141 1142 /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ 1143 crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); 1144 if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && 1145 NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1146 dev_err(&pdev->dev, "Incompatibility detected between driver " 1147 "and firmware version on flash. This configuration " 1148 "is not recommended. Please update the firmware on " 1149 "flash immediately\n"); 1150 return -EINVAL; 1151 } 1152 1153 /* check if flashed firmware is newer only for no-mn and P2 case*/ 1154 if (!netxen_p3_has_mn(adapter) || 1155 NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1156 if (flash_fw_ver > file_fw_ver) { 1157 dev_info(&pdev->dev, "%s: firmware is older than flash\n", 1158 fw_name[fw_type]); 1159 return -EINVAL; 1160 } 1161 } 1162 1163 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 1164 return 0; 1165 } 1166 1167 static void 1168 nx_get_next_fwtype(struct netxen_adapter *adapter) 1169 { 1170 u8 fw_type; 1171 1172 switch (adapter->fw_type) { 1173 case NX_UNKNOWN_ROMIMAGE: 1174 fw_type = NX_UNIFIED_ROMIMAGE; 1175 break; 1176 1177 case NX_UNIFIED_ROMIMAGE: 1178 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) 1179 fw_type = NX_FLASH_ROMIMAGE; 1180 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1181 fw_type = NX_P2_MN_ROMIMAGE; 1182 else if (netxen_p3_has_mn(adapter)) 1183 fw_type = NX_P3_MN_ROMIMAGE; 1184 else 1185 fw_type = NX_P3_CT_ROMIMAGE; 1186 break; 1187 1188 case NX_P3_MN_ROMIMAGE: 1189 fw_type = NX_P3_CT_ROMIMAGE; 1190 break; 1191 1192 case NX_P2_MN_ROMIMAGE: 1193 case NX_P3_CT_ROMIMAGE: 1194 default: 1195 fw_type = NX_FLASH_ROMIMAGE; 1196 break; 1197 } 1198 1199 adapter->fw_type = fw_type; 1200 } 1201 1202 static int 1203 netxen_p3_has_mn(struct netxen_adapter *adapter) 1204 { 1205 u32 capability, flashed_ver; 1206 capability = 0; 1207 1208 /* NX2031 always had MN */ 1209 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1210 return 1; 1211 1212 netxen_rom_fast_read(adapter, 1213 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 1214 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); 1215 1216 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { 1217 1218 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); 1219 if (capability & NX_PEG_TUNE_MN_PRESENT) 1220 return 1; 1221 } 1222 return 0; 1223 } 1224 1225 void netxen_request_firmware(struct netxen_adapter *adapter) 1226 { 1227 struct pci_dev *pdev = adapter->pdev; 1228 int rc = 0; 1229 1230 adapter->fw_type = NX_UNKNOWN_ROMIMAGE; 1231 1232 next: 1233 nx_get_next_fwtype(adapter); 1234 1235 if (adapter->fw_type == NX_FLASH_ROMIMAGE) { 1236 adapter->fw = NULL; 1237 } else { 1238 rc = request_firmware(&adapter->fw, 1239 fw_name[adapter->fw_type], &pdev->dev); 1240 if (rc != 0) 1241 goto next; 1242 1243 rc = netxen_validate_firmware(adapter); 1244 if (rc != 0) { 1245 release_firmware(adapter->fw); 1246 msleep(1); 1247 goto next; 1248 } 1249 } 1250 } 1251 1252 1253 void 1254 netxen_release_firmware(struct netxen_adapter *adapter) 1255 { 1256 release_firmware(adapter->fw); 1257 adapter->fw = NULL; 1258 } 1259 1260 int netxen_init_dummy_dma(struct netxen_adapter *adapter) 1261 { 1262 u64 addr; 1263 u32 hi, lo; 1264 1265 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1266 return 0; 1267 1268 adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, 1269 NETXEN_HOST_DUMMY_DMA_SIZE, 1270 &adapter->dummy_dma.phys_addr); 1271 if (adapter->dummy_dma.addr == NULL) { 1272 dev_err(&adapter->pdev->dev, 1273 "ERROR: Could not allocate dummy DMA memory\n"); 1274 return -ENOMEM; 1275 } 1276 1277 addr = (uint64_t) adapter->dummy_dma.phys_addr; 1278 hi = (addr >> 32) & 0xffffffff; 1279 lo = addr & 0xffffffff; 1280 1281 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); 1282 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); 1283 1284 return 0; 1285 } 1286 1287 /* 1288 * NetXen DMA watchdog control: 1289 * 1290 * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive 1291 * Bit 1 : disable_request => 1 req disable dma watchdog 1292 * Bit 2 : enable_request => 1 req enable dma watchdog 1293 * Bit 3-31 : unused 1294 */ 1295 void netxen_free_dummy_dma(struct netxen_adapter *adapter) 1296 { 1297 int i = 100; 1298 u32 ctrl; 1299 1300 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1301 return; 1302 1303 if (!adapter->dummy_dma.addr) 1304 return; 1305 1306 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); 1307 if ((ctrl & 0x1) != 0) { 1308 NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); 1309 1310 while ((ctrl & 0x1) != 0) { 1311 1312 msleep(50); 1313 1314 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); 1315 1316 if (--i == 0) 1317 break; 1318 } 1319 } 1320 1321 if (i) { 1322 pci_free_consistent(adapter->pdev, 1323 NETXEN_HOST_DUMMY_DMA_SIZE, 1324 adapter->dummy_dma.addr, 1325 adapter->dummy_dma.phys_addr); 1326 adapter->dummy_dma.addr = NULL; 1327 } else 1328 dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); 1329 } 1330 1331 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) 1332 { 1333 u32 val = 0; 1334 int retries = 60; 1335 1336 if (pegtune_val) 1337 return 0; 1338 1339 do { 1340 val = NXRD32(adapter, CRB_CMDPEG_STATE); 1341 switch (val) { 1342 case PHAN_INITIALIZE_COMPLETE: 1343 case PHAN_INITIALIZE_ACK: 1344 return 0; 1345 case PHAN_INITIALIZE_FAILED: 1346 goto out_err; 1347 default: 1348 break; 1349 } 1350 1351 msleep(500); 1352 1353 } while (--retries); 1354 1355 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1356 1357 out_err: 1358 dev_warn(&adapter->pdev->dev, "firmware init failed\n"); 1359 return -EIO; 1360 } 1361 1362 static int 1363 netxen_receive_peg_ready(struct netxen_adapter *adapter) 1364 { 1365 u32 val = 0; 1366 int retries = 2000; 1367 1368 do { 1369 val = NXRD32(adapter, CRB_RCVPEG_STATE); 1370 1371 if (val == PHAN_PEG_RCV_INITIALIZED) 1372 return 0; 1373 1374 msleep(10); 1375 1376 } while (--retries); 1377 1378 if (!retries) { 1379 printk(KERN_ERR "Receive Peg initialization not " 1380 "complete, state: 0x%x.\n", val); 1381 return -EIO; 1382 } 1383 1384 return 0; 1385 } 1386 1387 int netxen_init_firmware(struct netxen_adapter *adapter) 1388 { 1389 int err; 1390 1391 err = netxen_receive_peg_ready(adapter); 1392 if (err) 1393 return err; 1394 1395 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); 1396 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); 1397 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); 1398 1399 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1400 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); 1401 1402 return err; 1403 } 1404 1405 static void 1406 netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) 1407 { 1408 u32 cable_OUI; 1409 u16 cable_len; 1410 u16 link_speed; 1411 u8 link_status, module, duplex, autoneg; 1412 struct net_device *netdev = adapter->netdev; 1413 1414 adapter->has_link_events = 1; 1415 1416 cable_OUI = msg->body[1] & 0xffffffff; 1417 cable_len = (msg->body[1] >> 32) & 0xffff; 1418 link_speed = (msg->body[1] >> 48) & 0xffff; 1419 1420 link_status = msg->body[2] & 0xff; 1421 duplex = (msg->body[2] >> 16) & 0xff; 1422 autoneg = (msg->body[2] >> 24) & 0xff; 1423 1424 module = (msg->body[2] >> 8) & 0xff; 1425 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { 1426 printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", 1427 netdev->name, cable_OUI, cable_len); 1428 } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { 1429 printk(KERN_INFO "%s: unsupported cable length %d\n", 1430 netdev->name, cable_len); 1431 } 1432 1433 /* update link parameters */ 1434 if (duplex == LINKEVENT_FULL_DUPLEX) 1435 adapter->link_duplex = DUPLEX_FULL; 1436 else 1437 adapter->link_duplex = DUPLEX_HALF; 1438 adapter->module_type = module; 1439 adapter->link_autoneg = autoneg; 1440 adapter->link_speed = link_speed; 1441 1442 netxen_advert_link_change(adapter, link_status); 1443 } 1444 1445 static void 1446 netxen_handle_fw_message(int desc_cnt, int index, 1447 struct nx_host_sds_ring *sds_ring) 1448 { 1449 nx_fw_msg_t msg; 1450 struct status_desc *desc; 1451 int i = 0, opcode; 1452 1453 while (desc_cnt > 0 && i < 8) { 1454 desc = &sds_ring->desc_head[index]; 1455 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); 1456 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); 1457 1458 index = get_next_index(index, sds_ring->num_desc); 1459 desc_cnt--; 1460 } 1461 1462 opcode = netxen_get_nic_msg_opcode(msg.body[0]); 1463 switch (opcode) { 1464 case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: 1465 netxen_handle_linkevent(sds_ring->adapter, &msg); 1466 break; 1467 default: 1468 break; 1469 } 1470 } 1471 1472 static int 1473 netxen_alloc_rx_skb(struct netxen_adapter *adapter, 1474 struct nx_host_rds_ring *rds_ring, 1475 struct netxen_rx_buffer *buffer) 1476 { 1477 struct sk_buff *skb; 1478 dma_addr_t dma; 1479 struct pci_dev *pdev = adapter->pdev; 1480 1481 buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); 1482 if (!buffer->skb) 1483 return 1; 1484 1485 skb = buffer->skb; 1486 1487 if (!adapter->ahw.cut_through) 1488 skb_reserve(skb, 2); 1489 1490 dma = pci_map_single(pdev, skb->data, 1491 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1492 1493 if (pci_dma_mapping_error(pdev, dma)) { 1494 dev_kfree_skb_any(skb); 1495 buffer->skb = NULL; 1496 return 1; 1497 } 1498 1499 buffer->skb = skb; 1500 buffer->dma = dma; 1501 buffer->state = NETXEN_BUFFER_BUSY; 1502 1503 return 0; 1504 } 1505 1506 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, 1507 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) 1508 { 1509 struct netxen_rx_buffer *buffer; 1510 struct sk_buff *skb; 1511 1512 buffer = &rds_ring->rx_buf_arr[index]; 1513 1514 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, 1515 PCI_DMA_FROMDEVICE); 1516 1517 skb = buffer->skb; 1518 if (!skb) 1519 goto no_skb; 1520 1521 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) 1522 && cksum == STATUS_CKSUM_OK)) { 1523 adapter->stats.csummed++; 1524 skb->ip_summed = CHECKSUM_UNNECESSARY; 1525 } else 1526 skb->ip_summed = CHECKSUM_NONE; 1527 1528 buffer->skb = NULL; 1529 no_skb: 1530 buffer->state = NETXEN_BUFFER_FREE; 1531 return skb; 1532 } 1533 1534 static struct netxen_rx_buffer * 1535 netxen_process_rcv(struct netxen_adapter *adapter, 1536 struct nx_host_sds_ring *sds_ring, 1537 int ring, u64 sts_data0) 1538 { 1539 struct net_device *netdev = adapter->netdev; 1540 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1541 struct netxen_rx_buffer *buffer; 1542 struct sk_buff *skb; 1543 struct nx_host_rds_ring *rds_ring; 1544 int index, length, cksum, pkt_offset; 1545 1546 if (unlikely(ring >= adapter->max_rds_rings)) 1547 return NULL; 1548 1549 rds_ring = &recv_ctx->rds_rings[ring]; 1550 1551 index = netxen_get_sts_refhandle(sts_data0); 1552 if (unlikely(index >= rds_ring->num_desc)) 1553 return NULL; 1554 1555 buffer = &rds_ring->rx_buf_arr[index]; 1556 1557 length = netxen_get_sts_totallength(sts_data0); 1558 cksum = netxen_get_sts_status(sts_data0); 1559 pkt_offset = netxen_get_sts_pkt_offset(sts_data0); 1560 1561 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); 1562 if (!skb) 1563 return buffer; 1564 1565 if (length > rds_ring->skb_size) 1566 skb_put(skb, rds_ring->skb_size); 1567 else 1568 skb_put(skb, length); 1569 1570 1571 if (pkt_offset) 1572 skb_pull(skb, pkt_offset); 1573 1574 skb->protocol = eth_type_trans(skb, netdev); 1575 1576 napi_gro_receive(&sds_ring->napi, skb); 1577 1578 adapter->stats.rx_pkts++; 1579 adapter->stats.rxbytes += length; 1580 1581 return buffer; 1582 } 1583 1584 #define TCP_HDR_SIZE 20 1585 #define TCP_TS_OPTION_SIZE 12 1586 #define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) 1587 1588 static struct netxen_rx_buffer * 1589 netxen_process_lro(struct netxen_adapter *adapter, 1590 struct nx_host_sds_ring *sds_ring, 1591 int ring, u64 sts_data0, u64 sts_data1) 1592 { 1593 struct net_device *netdev = adapter->netdev; 1594 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1595 struct netxen_rx_buffer *buffer; 1596 struct sk_buff *skb; 1597 struct nx_host_rds_ring *rds_ring; 1598 struct iphdr *iph; 1599 struct tcphdr *th; 1600 bool push, timestamp; 1601 int l2_hdr_offset, l4_hdr_offset; 1602 int index; 1603 u16 lro_length, length, data_offset; 1604 u32 seq_number; 1605 u8 vhdr_len = 0; 1606 1607 if (unlikely(ring >= adapter->max_rds_rings)) 1608 return NULL; 1609 1610 rds_ring = &recv_ctx->rds_rings[ring]; 1611 1612 index = netxen_get_lro_sts_refhandle(sts_data0); 1613 if (unlikely(index >= rds_ring->num_desc)) 1614 return NULL; 1615 1616 buffer = &rds_ring->rx_buf_arr[index]; 1617 1618 timestamp = netxen_get_lro_sts_timestamp(sts_data0); 1619 lro_length = netxen_get_lro_sts_length(sts_data0); 1620 l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); 1621 l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); 1622 push = netxen_get_lro_sts_push_flag(sts_data0); 1623 seq_number = netxen_get_lro_sts_seq_number(sts_data1); 1624 1625 skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); 1626 if (!skb) 1627 return buffer; 1628 1629 if (timestamp) 1630 data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; 1631 else 1632 data_offset = l4_hdr_offset + TCP_HDR_SIZE; 1633 1634 skb_put(skb, lro_length + data_offset); 1635 1636 skb_pull(skb, l2_hdr_offset); 1637 skb->protocol = eth_type_trans(skb, netdev); 1638 1639 if (skb->protocol == htons(ETH_P_8021Q)) 1640 vhdr_len = VLAN_HLEN; 1641 iph = (struct iphdr *)(skb->data + vhdr_len); 1642 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); 1643 1644 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1645 csum_replace2(&iph->check, iph->tot_len, htons(length)); 1646 iph->tot_len = htons(length); 1647 th->psh = push; 1648 th->seq = htonl(seq_number); 1649 1650 length = skb->len; 1651 1652 if (adapter->flags & NETXEN_FW_MSS_CAP) 1653 skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1); 1654 1655 netif_receive_skb(skb); 1656 1657 adapter->stats.lro_pkts++; 1658 adapter->stats.rxbytes += length; 1659 1660 return buffer; 1661 } 1662 1663 #define netxen_merge_rx_buffers(list, head) \ 1664 do { list_splice_tail_init(list, head); } while (0); 1665 1666 int 1667 netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) 1668 { 1669 struct netxen_adapter *adapter = sds_ring->adapter; 1670 1671 struct list_head *cur; 1672 1673 struct status_desc *desc; 1674 struct netxen_rx_buffer *rxbuf; 1675 1676 u32 consumer = sds_ring->consumer; 1677 1678 int count = 0; 1679 u64 sts_data0, sts_data1; 1680 int opcode, ring = 0, desc_cnt; 1681 1682 while (count < max) { 1683 desc = &sds_ring->desc_head[consumer]; 1684 sts_data0 = le64_to_cpu(desc->status_desc_data[0]); 1685 1686 if (!(sts_data0 & STATUS_OWNER_HOST)) 1687 break; 1688 1689 desc_cnt = netxen_get_sts_desc_cnt(sts_data0); 1690 1691 opcode = netxen_get_sts_opcode(sts_data0); 1692 1693 switch (opcode) { 1694 case NETXEN_NIC_RXPKT_DESC: 1695 case NETXEN_OLD_RXPKT_DESC: 1696 case NETXEN_NIC_SYN_OFFLOAD: 1697 ring = netxen_get_sts_type(sts_data0); 1698 rxbuf = netxen_process_rcv(adapter, sds_ring, 1699 ring, sts_data0); 1700 break; 1701 case NETXEN_NIC_LRO_DESC: 1702 ring = netxen_get_lro_sts_type(sts_data0); 1703 sts_data1 = le64_to_cpu(desc->status_desc_data[1]); 1704 rxbuf = netxen_process_lro(adapter, sds_ring, 1705 ring, sts_data0, sts_data1); 1706 break; 1707 case NETXEN_NIC_RESPONSE_DESC: 1708 netxen_handle_fw_message(desc_cnt, consumer, sds_ring); 1709 default: 1710 goto skip; 1711 } 1712 1713 WARN_ON(desc_cnt > 1); 1714 1715 if (rxbuf) 1716 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1717 1718 skip: 1719 for (; desc_cnt > 0; desc_cnt--) { 1720 desc = &sds_ring->desc_head[consumer]; 1721 desc->status_desc_data[0] = 1722 cpu_to_le64(STATUS_OWNER_PHANTOM); 1723 consumer = get_next_index(consumer, sds_ring->num_desc); 1724 } 1725 count++; 1726 } 1727 1728 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1729 struct nx_host_rds_ring *rds_ring = 1730 &adapter->recv_ctx.rds_rings[ring]; 1731 1732 if (!list_empty(&sds_ring->free_list[ring])) { 1733 list_for_each(cur, &sds_ring->free_list[ring]) { 1734 rxbuf = list_entry(cur, 1735 struct netxen_rx_buffer, list); 1736 netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); 1737 } 1738 spin_lock(&rds_ring->lock); 1739 netxen_merge_rx_buffers(&sds_ring->free_list[ring], 1740 &rds_ring->free_list); 1741 spin_unlock(&rds_ring->lock); 1742 } 1743 1744 netxen_post_rx_buffers_nodb(adapter, rds_ring); 1745 } 1746 1747 if (count) { 1748 sds_ring->consumer = consumer; 1749 NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); 1750 } 1751 1752 return count; 1753 } 1754 1755 /* Process Command status ring */ 1756 int netxen_process_cmd_ring(struct netxen_adapter *adapter) 1757 { 1758 u32 sw_consumer, hw_consumer; 1759 int count = 0, i; 1760 struct netxen_cmd_buffer *buffer; 1761 struct pci_dev *pdev = adapter->pdev; 1762 struct net_device *netdev = adapter->netdev; 1763 struct netxen_skb_frag *frag; 1764 int done = 0; 1765 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 1766 1767 if (!spin_trylock_bh(&adapter->tx_clean_lock)) 1768 return 1; 1769 1770 sw_consumer = tx_ring->sw_consumer; 1771 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1772 1773 while (sw_consumer != hw_consumer) { 1774 buffer = &tx_ring->cmd_buf_arr[sw_consumer]; 1775 if (buffer->skb) { 1776 frag = &buffer->frag_array[0]; 1777 pci_unmap_single(pdev, frag->dma, frag->length, 1778 PCI_DMA_TODEVICE); 1779 frag->dma = 0ULL; 1780 for (i = 1; i < buffer->frag_count; i++) { 1781 frag++; /* Get the next frag */ 1782 pci_unmap_page(pdev, frag->dma, frag->length, 1783 PCI_DMA_TODEVICE); 1784 frag->dma = 0ULL; 1785 } 1786 1787 adapter->stats.xmitfinished++; 1788 dev_kfree_skb_any(buffer->skb); 1789 buffer->skb = NULL; 1790 } 1791 1792 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); 1793 if (++count >= MAX_STATUS_HANDLE) 1794 break; 1795 } 1796 1797 tx_ring->sw_consumer = sw_consumer; 1798 1799 if (count && netif_running(netdev)) { 1800 smp_mb(); 1801 1802 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) 1803 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) 1804 netif_wake_queue(netdev); 1805 adapter->tx_timeo_cnt = 0; 1806 } 1807 /* 1808 * If everything is freed up to consumer then check if the ring is full 1809 * If the ring is full then check if more needs to be freed and 1810 * schedule the call back again. 1811 * 1812 * This happens when there are 2 CPUs. One could be freeing and the 1813 * other filling it. If the ring is full when we get out of here and 1814 * the card has already interrupted the host then the host can miss the 1815 * interrupt. 1816 * 1817 * There is still a possible race condition and the host could miss an 1818 * interrupt. The card has to take care of this. 1819 */ 1820 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1821 done = (sw_consumer == hw_consumer); 1822 spin_unlock_bh(&adapter->tx_clean_lock); 1823 1824 return done; 1825 } 1826 1827 void 1828 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, 1829 struct nx_host_rds_ring *rds_ring) 1830 { 1831 struct rcv_desc *pdesc; 1832 struct netxen_rx_buffer *buffer; 1833 int producer, count = 0; 1834 netxen_ctx_msg msg = 0; 1835 struct list_head *head; 1836 1837 producer = rds_ring->producer; 1838 1839 head = &rds_ring->free_list; 1840 while (!list_empty(head)) { 1841 1842 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1843 1844 if (!buffer->skb) { 1845 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) 1846 break; 1847 } 1848 1849 count++; 1850 list_del(&buffer->list); 1851 1852 /* make a rcv descriptor */ 1853 pdesc = &rds_ring->desc_head[producer]; 1854 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1855 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1856 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1857 1858 producer = get_next_index(producer, rds_ring->num_desc); 1859 } 1860 1861 if (count) { 1862 rds_ring->producer = producer; 1863 NXWRIO(adapter, rds_ring->crb_rcv_producer, 1864 (producer-1) & (rds_ring->num_desc-1)); 1865 1866 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1867 /* 1868 * Write a doorbell msg to tell phanmon of change in 1869 * receive ring producer 1870 * Only for firmware version < 4.0.0 1871 */ 1872 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); 1873 netxen_set_msg_privid(msg); 1874 netxen_set_msg_count(msg, 1875 ((producer - 1) & 1876 (rds_ring->num_desc - 1))); 1877 netxen_set_msg_ctxid(msg, adapter->portnum); 1878 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); 1879 NXWRIO(adapter, DB_NORMALIZE(adapter, 1880 NETXEN_RCV_PRODUCER_OFFSET), msg); 1881 } 1882 } 1883 } 1884 1885 static void 1886 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 1887 struct nx_host_rds_ring *rds_ring) 1888 { 1889 struct rcv_desc *pdesc; 1890 struct netxen_rx_buffer *buffer; 1891 int producer, count = 0; 1892 struct list_head *head; 1893 1894 if (!spin_trylock(&rds_ring->lock)) 1895 return; 1896 1897 producer = rds_ring->producer; 1898 1899 head = &rds_ring->free_list; 1900 while (!list_empty(head)) { 1901 1902 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1903 1904 if (!buffer->skb) { 1905 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) 1906 break; 1907 } 1908 1909 count++; 1910 list_del(&buffer->list); 1911 1912 /* make a rcv descriptor */ 1913 pdesc = &rds_ring->desc_head[producer]; 1914 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1915 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1916 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1917 1918 producer = get_next_index(producer, rds_ring->num_desc); 1919 } 1920 1921 if (count) { 1922 rds_ring->producer = producer; 1923 NXWRIO(adapter, rds_ring->crb_rcv_producer, 1924 (producer - 1) & (rds_ring->num_desc - 1)); 1925 } 1926 spin_unlock(&rds_ring->lock); 1927 } 1928 1929 void netxen_nic_clear_stats(struct netxen_adapter *adapter) 1930 { 1931 memset(&adapter->stats, 0, sizeof(adapter->stats)); 1932 } 1933 1934