1 /* 2 * Copyright (C) 2003 - 2009 NetXen, Inc. 3 * Copyright (C) 2009 - QLogic Corporation. 4 * All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * 19 * The full GNU General Public License is included in this distribution 20 * in the file called "COPYING". 21 * 22 */ 23 24 #include <linux/netdevice.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <net/checksum.h> 29 #include "netxen_nic.h" 30 #include "netxen_nic_hw.h" 31 32 struct crb_addr_pair { 33 u32 addr; 34 u32 data; 35 }; 36 37 #define NETXEN_MAX_CRB_XFORM 60 38 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; 39 #define NETXEN_ADDR_ERROR (0xffffffff) 40 41 #define crb_addr_transform(name) \ 42 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ 43 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 44 45 #define NETXEN_NIC_XDMA_RESET 0x8000ff 46 47 static void 48 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 49 struct nx_host_rds_ring *rds_ring); 50 static int netxen_p3_has_mn(struct netxen_adapter *adapter); 51 52 static void crb_addr_transform_setup(void) 53 { 54 crb_addr_transform(XDMA); 55 crb_addr_transform(TIMR); 56 crb_addr_transform(SRE); 57 crb_addr_transform(SQN3); 58 crb_addr_transform(SQN2); 59 crb_addr_transform(SQN1); 60 crb_addr_transform(SQN0); 61 crb_addr_transform(SQS3); 62 crb_addr_transform(SQS2); 63 crb_addr_transform(SQS1); 64 crb_addr_transform(SQS0); 65 crb_addr_transform(RPMX7); 66 crb_addr_transform(RPMX6); 67 crb_addr_transform(RPMX5); 68 crb_addr_transform(RPMX4); 69 crb_addr_transform(RPMX3); 70 crb_addr_transform(RPMX2); 71 crb_addr_transform(RPMX1); 72 crb_addr_transform(RPMX0); 73 crb_addr_transform(ROMUSB); 74 crb_addr_transform(SN); 75 crb_addr_transform(QMN); 76 crb_addr_transform(QMS); 77 crb_addr_transform(PGNI); 78 crb_addr_transform(PGND); 79 crb_addr_transform(PGN3); 80 crb_addr_transform(PGN2); 81 crb_addr_transform(PGN1); 82 crb_addr_transform(PGN0); 83 crb_addr_transform(PGSI); 84 crb_addr_transform(PGSD); 85 crb_addr_transform(PGS3); 86 crb_addr_transform(PGS2); 87 crb_addr_transform(PGS1); 88 crb_addr_transform(PGS0); 89 crb_addr_transform(PS); 90 crb_addr_transform(PH); 91 crb_addr_transform(NIU); 92 crb_addr_transform(I2Q); 93 crb_addr_transform(EG); 94 crb_addr_transform(MN); 95 crb_addr_transform(MS); 96 crb_addr_transform(CAS2); 97 crb_addr_transform(CAS1); 98 crb_addr_transform(CAS0); 99 crb_addr_transform(CAM); 100 crb_addr_transform(C2C1); 101 crb_addr_transform(C2C0); 102 crb_addr_transform(SMB); 103 crb_addr_transform(OCM0); 104 crb_addr_transform(I2C0); 105 } 106 107 void netxen_release_rx_buffers(struct netxen_adapter *adapter) 108 { 109 struct netxen_recv_context *recv_ctx; 110 struct nx_host_rds_ring *rds_ring; 111 struct netxen_rx_buffer *rx_buf; 112 int i, ring; 113 114 recv_ctx = &adapter->recv_ctx; 115 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 116 rds_ring = &recv_ctx->rds_rings[ring]; 117 for (i = 0; i < rds_ring->num_desc; ++i) { 118 rx_buf = &(rds_ring->rx_buf_arr[i]); 119 if (rx_buf->state == NETXEN_BUFFER_FREE) 120 continue; 121 pci_unmap_single(adapter->pdev, 122 rx_buf->dma, 123 rds_ring->dma_size, 124 PCI_DMA_FROMDEVICE); 125 if (rx_buf->skb != NULL) 126 dev_kfree_skb_any(rx_buf->skb); 127 } 128 } 129 } 130 131 void netxen_release_tx_buffers(struct netxen_adapter *adapter) 132 { 133 struct netxen_cmd_buffer *cmd_buf; 134 struct netxen_skb_frag *buffrag; 135 int i, j; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 137 138 spin_lock_bh(&adapter->tx_clean_lock); 139 cmd_buf = tx_ring->cmd_buf_arr; 140 for (i = 0; i < tx_ring->num_desc; i++) { 141 buffrag = cmd_buf->frag_array; 142 if (buffrag->dma) { 143 pci_unmap_single(adapter->pdev, buffrag->dma, 144 buffrag->length, PCI_DMA_TODEVICE); 145 buffrag->dma = 0ULL; 146 } 147 for (j = 1; j < cmd_buf->frag_count; j++) { 148 buffrag++; 149 if (buffrag->dma) { 150 pci_unmap_page(adapter->pdev, buffrag->dma, 151 buffrag->length, 152 PCI_DMA_TODEVICE); 153 buffrag->dma = 0ULL; 154 } 155 } 156 if (cmd_buf->skb) { 157 dev_kfree_skb_any(cmd_buf->skb); 158 cmd_buf->skb = NULL; 159 } 160 cmd_buf++; 161 } 162 spin_unlock_bh(&adapter->tx_clean_lock); 163 } 164 165 void netxen_free_sw_resources(struct netxen_adapter *adapter) 166 { 167 struct netxen_recv_context *recv_ctx; 168 struct nx_host_rds_ring *rds_ring; 169 struct nx_host_tx_ring *tx_ring; 170 int ring; 171 172 recv_ctx = &adapter->recv_ctx; 173 174 if (recv_ctx->rds_rings == NULL) 175 goto skip_rds; 176 177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 178 rds_ring = &recv_ctx->rds_rings[ring]; 179 vfree(rds_ring->rx_buf_arr); 180 rds_ring->rx_buf_arr = NULL; 181 } 182 kfree(recv_ctx->rds_rings); 183 184 skip_rds: 185 if (adapter->tx_ring == NULL) 186 return; 187 188 tx_ring = adapter->tx_ring; 189 vfree(tx_ring->cmd_buf_arr); 190 kfree(tx_ring); 191 adapter->tx_ring = NULL; 192 } 193 194 int netxen_alloc_sw_resources(struct netxen_adapter *adapter) 195 { 196 struct netxen_recv_context *recv_ctx; 197 struct nx_host_rds_ring *rds_ring; 198 struct nx_host_sds_ring *sds_ring; 199 struct nx_host_tx_ring *tx_ring; 200 struct netxen_rx_buffer *rx_buf; 201 int ring, i; 202 203 struct netxen_cmd_buffer *cmd_buf_arr; 204 struct net_device *netdev = adapter->netdev; 205 206 tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL); 207 if (tx_ring == NULL) 208 return -ENOMEM; 209 210 adapter->tx_ring = tx_ring; 211 212 tx_ring->num_desc = adapter->num_txd; 213 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 214 215 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); 216 if (cmd_buf_arr == NULL) 217 goto err_out; 218 219 tx_ring->cmd_buf_arr = cmd_buf_arr; 220 221 recv_ctx = &adapter->recv_ctx; 222 223 rds_ring = kcalloc(adapter->max_rds_rings, 224 sizeof(struct nx_host_rds_ring), GFP_KERNEL); 225 if (rds_ring == NULL) 226 goto err_out; 227 228 recv_ctx->rds_rings = rds_ring; 229 230 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 231 rds_ring = &recv_ctx->rds_rings[ring]; 232 switch (ring) { 233 case RCV_RING_NORMAL: 234 rds_ring->num_desc = adapter->num_rxd; 235 if (adapter->ahw.cut_through) { 236 rds_ring->dma_size = 237 NX_CT_DEFAULT_RX_BUF_LEN; 238 rds_ring->skb_size = 239 NX_CT_DEFAULT_RX_BUF_LEN; 240 } else { 241 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 242 rds_ring->dma_size = 243 NX_P3_RX_BUF_MAX_LEN; 244 else 245 rds_ring->dma_size = 246 NX_P2_RX_BUF_MAX_LEN; 247 rds_ring->skb_size = 248 rds_ring->dma_size + NET_IP_ALIGN; 249 } 250 break; 251 252 case RCV_RING_JUMBO: 253 rds_ring->num_desc = adapter->num_jumbo_rxd; 254 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 255 rds_ring->dma_size = 256 NX_P3_RX_JUMBO_BUF_MAX_LEN; 257 else 258 rds_ring->dma_size = 259 NX_P2_RX_JUMBO_BUF_MAX_LEN; 260 261 if (adapter->capabilities & NX_CAP0_HW_LRO) 262 rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; 263 264 rds_ring->skb_size = 265 rds_ring->dma_size + NET_IP_ALIGN; 266 break; 267 268 case RCV_RING_LRO: 269 rds_ring->num_desc = adapter->num_lro_rxd; 270 rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; 271 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; 272 break; 273 274 } 275 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); 276 if (rds_ring->rx_buf_arr == NULL) 277 /* free whatever was already allocated */ 278 goto err_out; 279 280 INIT_LIST_HEAD(&rds_ring->free_list); 281 /* 282 * Now go through all of them, set reference handles 283 * and put them in the queues. 284 */ 285 rx_buf = rds_ring->rx_buf_arr; 286 for (i = 0; i < rds_ring->num_desc; i++) { 287 list_add_tail(&rx_buf->list, 288 &rds_ring->free_list); 289 rx_buf->ref_handle = i; 290 rx_buf->state = NETXEN_BUFFER_FREE; 291 rx_buf++; 292 } 293 spin_lock_init(&rds_ring->lock); 294 } 295 296 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 297 sds_ring = &recv_ctx->sds_rings[ring]; 298 sds_ring->irq = adapter->msix_entries[ring].vector; 299 sds_ring->adapter = adapter; 300 sds_ring->num_desc = adapter->num_rxd; 301 302 for (i = 0; i < NUM_RCV_DESC_RINGS; i++) 303 INIT_LIST_HEAD(&sds_ring->free_list[i]); 304 } 305 306 return 0; 307 308 err_out: 309 netxen_free_sw_resources(adapter); 310 return -ENOMEM; 311 } 312 313 /* 314 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB 315 * address to external PCI CRB address. 316 */ 317 static u32 netxen_decode_crb_addr(u32 addr) 318 { 319 int i; 320 u32 base_addr, offset, pci_base; 321 322 crb_addr_transform_setup(); 323 324 pci_base = NETXEN_ADDR_ERROR; 325 base_addr = addr & 0xfff00000; 326 offset = addr & 0x000fffff; 327 328 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { 329 if (crb_addr_xform[i] == base_addr) { 330 pci_base = i << 20; 331 break; 332 } 333 } 334 if (pci_base == NETXEN_ADDR_ERROR) 335 return pci_base; 336 else 337 return pci_base + offset; 338 } 339 340 #define NETXEN_MAX_ROM_WAIT_USEC 100 341 342 static int netxen_wait_rom_done(struct netxen_adapter *adapter) 343 { 344 long timeout = 0; 345 long done = 0; 346 347 cond_resched(); 348 349 while (done == 0) { 350 done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); 351 done &= 2; 352 if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { 353 dev_err(&adapter->pdev->dev, 354 "Timeout reached waiting for rom done"); 355 return -EIO; 356 } 357 udelay(1); 358 } 359 return 0; 360 } 361 362 static int do_rom_fast_read(struct netxen_adapter *adapter, 363 int addr, int *valp) 364 { 365 NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 366 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 367 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 368 NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 369 if (netxen_wait_rom_done(adapter)) { 370 printk("Error waiting for rom done\n"); 371 return -EIO; 372 } 373 /* reset abyte_cnt and dummy_byte_cnt */ 374 NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 375 udelay(10); 376 NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 377 378 *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); 379 return 0; 380 } 381 382 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 383 u8 *bytes, size_t size) 384 { 385 int addridx; 386 int ret = 0; 387 388 for (addridx = addr; addridx < (addr + size); addridx += 4) { 389 int v; 390 ret = do_rom_fast_read(adapter, addridx, &v); 391 if (ret != 0) 392 break; 393 *(__le32 *)bytes = cpu_to_le32(v); 394 bytes += 4; 395 } 396 397 return ret; 398 } 399 400 int 401 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 402 u8 *bytes, size_t size) 403 { 404 int ret; 405 406 ret = netxen_rom_lock(adapter); 407 if (ret < 0) 408 return ret; 409 410 ret = do_rom_fast_read_words(adapter, addr, bytes, size); 411 412 netxen_rom_unlock(adapter); 413 return ret; 414 } 415 416 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) 417 { 418 int ret; 419 420 if (netxen_rom_lock(adapter) != 0) 421 return -EIO; 422 423 ret = do_rom_fast_read(adapter, addr, valp); 424 netxen_rom_unlock(adapter); 425 return ret; 426 } 427 428 #define NETXEN_BOARDTYPE 0x4008 429 #define NETXEN_BOARDNUM 0x400c 430 #define NETXEN_CHIPNUM 0x4010 431 432 int netxen_pinit_from_rom(struct netxen_adapter *adapter) 433 { 434 int addr, val; 435 int i, n, init_delay = 0; 436 struct crb_addr_pair *buf; 437 unsigned offset; 438 u32 off; 439 440 /* resetall */ 441 netxen_rom_lock(adapter); 442 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff); 443 netxen_rom_unlock(adapter); 444 445 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 446 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 447 (n != 0xcafecafe) || 448 netxen_rom_fast_read(adapter, 4, &n) != 0) { 449 printk(KERN_ERR "%s: ERROR Reading crb_init area: " 450 "n: %08x\n", netxen_nic_driver_name, n); 451 return -EIO; 452 } 453 offset = n & 0xffffU; 454 n = (n >> 16) & 0xffffU; 455 } else { 456 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 457 !(n & 0x80000000)) { 458 printk(KERN_ERR "%s: ERROR Reading crb_init area: " 459 "n: %08x\n", netxen_nic_driver_name, n); 460 return -EIO; 461 } 462 offset = 1; 463 n &= ~0x80000000; 464 } 465 466 if (n >= 1024) { 467 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" 468 " initialized.\n", __func__, n); 469 return -EIO; 470 } 471 472 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); 473 if (buf == NULL) 474 return -ENOMEM; 475 476 for (i = 0; i < n; i++) { 477 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 478 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { 479 kfree(buf); 480 return -EIO; 481 } 482 483 buf[i].addr = addr; 484 buf[i].data = val; 485 486 } 487 488 for (i = 0; i < n; i++) { 489 490 off = netxen_decode_crb_addr(buf[i].addr); 491 if (off == NETXEN_ADDR_ERROR) { 492 printk(KERN_ERR"CRB init value out of range %x\n", 493 buf[i].addr); 494 continue; 495 } 496 off += NETXEN_PCI_CRBSPACE; 497 498 if (off & 1) 499 continue; 500 501 /* skipping cold reboot MAGIC */ 502 if (off == NETXEN_CAM_RAM(0x1fc)) 503 continue; 504 505 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 506 if (off == (NETXEN_CRB_I2C0 + 0x1c)) 507 continue; 508 /* do not reset PCI */ 509 if (off == (ROMUSB_GLB + 0xbc)) 510 continue; 511 if (off == (ROMUSB_GLB + 0xa8)) 512 continue; 513 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ 514 continue; 515 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ 516 continue; 517 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 518 continue; 519 if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) 520 continue; 521 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && 522 !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) 523 buf[i].data = 0x1020; 524 /* skip the function enable register */ 525 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) 526 continue; 527 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) 528 continue; 529 if ((off & 0x0ff00000) == NETXEN_CRB_SMB) 530 continue; 531 } 532 533 init_delay = 1; 534 /* After writing this register, HW needs time for CRB */ 535 /* to quiet down (else crb_window returns 0xffffffff) */ 536 if (off == NETXEN_ROMUSB_GLB_SW_RESET) { 537 init_delay = 1000; 538 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 539 /* hold xdma in reset also */ 540 buf[i].data = NETXEN_NIC_XDMA_RESET; 541 buf[i].data = 0x8000ff; 542 } 543 } 544 545 NXWR32(adapter, off, buf[i].data); 546 547 msleep(init_delay); 548 } 549 kfree(buf); 550 551 /* disable_peg_cache_all */ 552 553 /* unreset_net_cache */ 554 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 555 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); 556 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); 557 } 558 559 /* p2dn replyCount */ 560 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); 561 /* disable_peg_cache 0 */ 562 NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); 563 /* disable_peg_cache 1 */ 564 NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); 565 566 /* peg_clr_all */ 567 568 /* peg_clr 0 */ 569 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); 570 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); 571 /* peg_clr 1 */ 572 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); 573 NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); 574 /* peg_clr 2 */ 575 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); 576 NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); 577 /* peg_clr 3 */ 578 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); 579 NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); 580 return 0; 581 } 582 583 static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) 584 { 585 uint32_t i; 586 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 587 __le32 entries = cpu_to_le32(directory->num_entries); 588 589 for (i = 0; i < entries; i++) { 590 591 __le32 offs = cpu_to_le32(directory->findex) + 592 (i * cpu_to_le32(directory->entry_size)); 593 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); 594 595 if (tab_type == section) 596 return (struct uni_table_desc *) &unirom[offs]; 597 } 598 599 return NULL; 600 } 601 602 #define QLCNIC_FILEHEADER_SIZE (14 * 4) 603 604 static int 605 netxen_nic_validate_header(struct netxen_adapter *adapter) 606 { 607 const u8 *unirom = adapter->fw->data; 608 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 609 u32 fw_file_size = adapter->fw->size; 610 u32 tab_size; 611 __le32 entries; 612 __le32 entry_size; 613 614 if (fw_file_size < QLCNIC_FILEHEADER_SIZE) 615 return -EINVAL; 616 617 entries = cpu_to_le32(directory->num_entries); 618 entry_size = cpu_to_le32(directory->entry_size); 619 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); 620 621 if (fw_file_size < tab_size) 622 return -EINVAL; 623 624 return 0; 625 } 626 627 static int 628 netxen_nic_validate_bootld(struct netxen_adapter *adapter) 629 { 630 struct uni_table_desc *tab_desc; 631 struct uni_data_desc *descr; 632 const u8 *unirom = adapter->fw->data; 633 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 634 NX_UNI_BOOTLD_IDX_OFF)); 635 u32 offs; 636 u32 tab_size; 637 u32 data_size; 638 639 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); 640 641 if (!tab_desc) 642 return -EINVAL; 643 644 tab_size = cpu_to_le32(tab_desc->findex) + 645 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 646 647 if (adapter->fw->size < tab_size) 648 return -EINVAL; 649 650 offs = cpu_to_le32(tab_desc->findex) + 651 (cpu_to_le32(tab_desc->entry_size) * (idx)); 652 descr = (struct uni_data_desc *)&unirom[offs]; 653 654 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 655 656 if (adapter->fw->size < data_size) 657 return -EINVAL; 658 659 return 0; 660 } 661 662 static int 663 netxen_nic_validate_fw(struct netxen_adapter *adapter) 664 { 665 struct uni_table_desc *tab_desc; 666 struct uni_data_desc *descr; 667 const u8 *unirom = adapter->fw->data; 668 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 669 NX_UNI_FIRMWARE_IDX_OFF)); 670 u32 offs; 671 u32 tab_size; 672 u32 data_size; 673 674 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); 675 676 if (!tab_desc) 677 return -EINVAL; 678 679 tab_size = cpu_to_le32(tab_desc->findex) + 680 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 681 682 if (adapter->fw->size < tab_size) 683 return -EINVAL; 684 685 offs = cpu_to_le32(tab_desc->findex) + 686 (cpu_to_le32(tab_desc->entry_size) * (idx)); 687 descr = (struct uni_data_desc *)&unirom[offs]; 688 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 689 690 if (adapter->fw->size < data_size) 691 return -EINVAL; 692 693 return 0; 694 } 695 696 697 static int 698 netxen_nic_validate_product_offs(struct netxen_adapter *adapter) 699 { 700 struct uni_table_desc *ptab_descr; 701 const u8 *unirom = adapter->fw->data; 702 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 703 1 : netxen_p3_has_mn(adapter); 704 __le32 entries; 705 __le32 entry_size; 706 u32 tab_size; 707 u32 i; 708 709 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); 710 if (ptab_descr == NULL) 711 return -EINVAL; 712 713 entries = cpu_to_le32(ptab_descr->num_entries); 714 entry_size = cpu_to_le32(ptab_descr->entry_size); 715 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); 716 717 if (adapter->fw->size < tab_size) 718 return -EINVAL; 719 720 nomn: 721 for (i = 0; i < entries; i++) { 722 723 __le32 flags, file_chiprev, offs; 724 u8 chiprev = adapter->ahw.revision_id; 725 uint32_t flagbit; 726 727 offs = cpu_to_le32(ptab_descr->findex) + 728 (i * cpu_to_le32(ptab_descr->entry_size)); 729 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); 730 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + 731 NX_UNI_CHIP_REV_OFF)); 732 733 flagbit = mn_present ? 1 : 2; 734 735 if ((chiprev == file_chiprev) && 736 ((1ULL << flagbit) & flags)) { 737 adapter->file_prd_off = offs; 738 return 0; 739 } 740 } 741 742 if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 743 mn_present = 0; 744 goto nomn; 745 } 746 747 return -EINVAL; 748 } 749 750 static int 751 netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) 752 { 753 if (netxen_nic_validate_header(adapter)) { 754 dev_err(&adapter->pdev->dev, 755 "unified image: header validation failed\n"); 756 return -EINVAL; 757 } 758 759 if (netxen_nic_validate_product_offs(adapter)) { 760 dev_err(&adapter->pdev->dev, 761 "unified image: product validation failed\n"); 762 return -EINVAL; 763 } 764 765 if (netxen_nic_validate_bootld(adapter)) { 766 dev_err(&adapter->pdev->dev, 767 "unified image: bootld validation failed\n"); 768 return -EINVAL; 769 } 770 771 if (netxen_nic_validate_fw(adapter)) { 772 dev_err(&adapter->pdev->dev, 773 "unified image: firmware validation failed\n"); 774 return -EINVAL; 775 } 776 777 return 0; 778 } 779 780 static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, 781 u32 section, u32 idx_offset) 782 { 783 const u8 *unirom = adapter->fw->data; 784 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 785 idx_offset)); 786 struct uni_table_desc *tab_desc; 787 __le32 offs; 788 789 tab_desc = nx_get_table_desc(unirom, section); 790 791 if (tab_desc == NULL) 792 return NULL; 793 794 offs = cpu_to_le32(tab_desc->findex) + 795 (cpu_to_le32(tab_desc->entry_size) * idx); 796 797 return (struct uni_data_desc *)&unirom[offs]; 798 } 799 800 static u8 * 801 nx_get_bootld_offs(struct netxen_adapter *adapter) 802 { 803 u32 offs = NETXEN_BOOTLD_START; 804 805 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 806 offs = cpu_to_le32((nx_get_data_desc(adapter, 807 NX_UNI_DIR_SECT_BOOTLD, 808 NX_UNI_BOOTLD_IDX_OFF))->findex); 809 810 return (u8 *)&adapter->fw->data[offs]; 811 } 812 813 static u8 * 814 nx_get_fw_offs(struct netxen_adapter *adapter) 815 { 816 u32 offs = NETXEN_IMAGE_START; 817 818 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 819 offs = cpu_to_le32((nx_get_data_desc(adapter, 820 NX_UNI_DIR_SECT_FW, 821 NX_UNI_FIRMWARE_IDX_OFF))->findex); 822 823 return (u8 *)&adapter->fw->data[offs]; 824 } 825 826 static __le32 827 nx_get_fw_size(struct netxen_adapter *adapter) 828 { 829 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) 830 return cpu_to_le32((nx_get_data_desc(adapter, 831 NX_UNI_DIR_SECT_FW, 832 NX_UNI_FIRMWARE_IDX_OFF))->size); 833 else 834 return cpu_to_le32( 835 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); 836 } 837 838 static __le32 839 nx_get_fw_version(struct netxen_adapter *adapter) 840 { 841 struct uni_data_desc *fw_data_desc; 842 const struct firmware *fw = adapter->fw; 843 __le32 major, minor, sub; 844 const u8 *ver_str; 845 int i, ret = 0; 846 847 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 848 849 fw_data_desc = nx_get_data_desc(adapter, 850 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); 851 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + 852 cpu_to_le32(fw_data_desc->size) - 17; 853 854 for (i = 0; i < 12; i++) { 855 if (!strncmp(&ver_str[i], "REV=", 4)) { 856 ret = sscanf(&ver_str[i+4], "%u.%u.%u ", 857 &major, &minor, &sub); 858 break; 859 } 860 } 861 862 if (ret != 3) 863 return 0; 864 865 return major + (minor << 8) + (sub << 16); 866 867 } else 868 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 869 } 870 871 static __le32 872 nx_get_bios_version(struct netxen_adapter *adapter) 873 { 874 const struct firmware *fw = adapter->fw; 875 __le32 bios_ver, prd_off = adapter->file_prd_off; 876 877 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 878 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) 879 + NX_UNI_BIOS_VERSION_OFF)); 880 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + 881 (bios_ver >> 24); 882 } else 883 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 884 885 } 886 887 int 888 netxen_need_fw_reset(struct netxen_adapter *adapter) 889 { 890 u32 count, old_count; 891 u32 val, version, major, minor, build; 892 int i, timeout; 893 u8 fw_type; 894 895 /* NX2031 firmware doesn't support heartbit */ 896 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 897 return 1; 898 899 if (adapter->need_fw_reset) 900 return 1; 901 902 /* last attempt had failed */ 903 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 904 return 1; 905 906 old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 907 908 for (i = 0; i < 10; i++) { 909 910 timeout = msleep_interruptible(200); 911 if (timeout) { 912 NXWR32(adapter, CRB_CMDPEG_STATE, 913 PHAN_INITIALIZE_FAILED); 914 return -EINTR; 915 } 916 917 count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 918 if (count != old_count) 919 break; 920 } 921 922 /* firmware is dead */ 923 if (count == old_count) 924 return 1; 925 926 /* check if we have got newer or different file firmware */ 927 if (adapter->fw) { 928 929 val = nx_get_fw_version(adapter); 930 931 version = NETXEN_DECODE_VERSION(val); 932 933 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 934 minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 935 build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 936 937 if (version > NETXEN_VERSION_CODE(major, minor, build)) 938 return 1; 939 940 if (version == NETXEN_VERSION_CODE(major, minor, build) && 941 adapter->fw_type != NX_UNIFIED_ROMIMAGE) { 942 943 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 944 fw_type = (val & 0x4) ? 945 NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; 946 947 if (adapter->fw_type != fw_type) 948 return 1; 949 } 950 } 951 952 return 0; 953 } 954 955 #define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) 956 957 int 958 netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) 959 { 960 u32 flash_fw_ver, min_fw_ver; 961 962 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 963 return 0; 964 965 if (netxen_rom_fast_read(adapter, 966 NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { 967 dev_err(&adapter->pdev->dev, "Unable to read flash fw" 968 "version\n"); 969 return -EIO; 970 } 971 972 flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); 973 min_fw_ver = NETXEN_MIN_P3_FW_SUPP; 974 if (flash_fw_ver >= min_fw_ver) 975 return 0; 976 977 dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" 978 "[4.0.505]. Please update firmware on flash\n", 979 _major(flash_fw_ver), _minor(flash_fw_ver), 980 _build(flash_fw_ver)); 981 return -EINVAL; 982 } 983 984 static char *fw_name[] = { 985 NX_P2_MN_ROMIMAGE_NAME, 986 NX_P3_CT_ROMIMAGE_NAME, 987 NX_P3_MN_ROMIMAGE_NAME, 988 NX_UNIFIED_ROMIMAGE_NAME, 989 NX_FLASH_ROMIMAGE_NAME, 990 }; 991 992 int 993 netxen_load_firmware(struct netxen_adapter *adapter) 994 { 995 u64 *ptr64; 996 u32 i, flashaddr, size; 997 const struct firmware *fw = adapter->fw; 998 struct pci_dev *pdev = adapter->pdev; 999 1000 dev_info(&pdev->dev, "loading firmware from %s\n", 1001 fw_name[adapter->fw_type]); 1002 1003 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1004 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); 1005 1006 if (fw) { 1007 __le64 data; 1008 1009 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 1010 1011 ptr64 = (u64 *)nx_get_bootld_offs(adapter); 1012 flashaddr = NETXEN_BOOTLD_START; 1013 1014 for (i = 0; i < size; i++) { 1015 data = cpu_to_le64(ptr64[i]); 1016 1017 if (adapter->pci_mem_write(adapter, flashaddr, data)) 1018 return -EIO; 1019 1020 flashaddr += 8; 1021 } 1022 1023 size = (__force u32)nx_get_fw_size(adapter) / 8; 1024 1025 ptr64 = (u64 *)nx_get_fw_offs(adapter); 1026 flashaddr = NETXEN_IMAGE_START; 1027 1028 for (i = 0; i < size; i++) { 1029 data = cpu_to_le64(ptr64[i]); 1030 1031 if (adapter->pci_mem_write(adapter, 1032 flashaddr, data)) 1033 return -EIO; 1034 1035 flashaddr += 8; 1036 } 1037 1038 size = (__force u32)nx_get_fw_size(adapter) % 8; 1039 if (size) { 1040 data = cpu_to_le64(ptr64[i]); 1041 1042 if (adapter->pci_mem_write(adapter, 1043 flashaddr, data)) 1044 return -EIO; 1045 } 1046 1047 } else { 1048 u64 data; 1049 u32 hi, lo; 1050 1051 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 1052 flashaddr = NETXEN_BOOTLD_START; 1053 1054 for (i = 0; i < size; i++) { 1055 if (netxen_rom_fast_read(adapter, 1056 flashaddr, (int *)&lo) != 0) 1057 return -EIO; 1058 if (netxen_rom_fast_read(adapter, 1059 flashaddr + 4, (int *)&hi) != 0) 1060 return -EIO; 1061 1062 /* hi, lo are already in host endian byteorder */ 1063 data = (((u64)hi << 32) | lo); 1064 1065 if (adapter->pci_mem_write(adapter, 1066 flashaddr, data)) 1067 return -EIO; 1068 1069 flashaddr += 8; 1070 } 1071 } 1072 msleep(1); 1073 1074 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 1075 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); 1076 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); 1077 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1078 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); 1079 else { 1080 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); 1081 NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); 1082 } 1083 1084 return 0; 1085 } 1086 1087 static int 1088 netxen_validate_firmware(struct netxen_adapter *adapter) 1089 { 1090 __le32 val; 1091 __le32 flash_fw_ver; 1092 u32 file_fw_ver, min_ver, bios; 1093 struct pci_dev *pdev = adapter->pdev; 1094 const struct firmware *fw = adapter->fw; 1095 u8 fw_type = adapter->fw_type; 1096 u32 crbinit_fix_fw; 1097 1098 if (fw_type == NX_UNIFIED_ROMIMAGE) { 1099 if (netxen_nic_validate_unified_romimage(adapter)) 1100 return -EINVAL; 1101 } else { 1102 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 1103 if ((__force u32)val != NETXEN_BDINFO_MAGIC) 1104 return -EINVAL; 1105 1106 if (fw->size < NX_FW_MIN_SIZE) 1107 return -EINVAL; 1108 } 1109 1110 val = nx_get_fw_version(adapter); 1111 1112 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1113 min_ver = NETXEN_MIN_P3_FW_SUPP; 1114 else 1115 min_ver = NETXEN_VERSION_CODE(3, 4, 216); 1116 1117 file_fw_ver = NETXEN_DECODE_VERSION(val); 1118 1119 if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || 1120 (file_fw_ver < min_ver)) { 1121 dev_err(&pdev->dev, 1122 "%s: firmware version %d.%d.%d unsupported\n", 1123 fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), 1124 _build(file_fw_ver)); 1125 return -EINVAL; 1126 } 1127 val = nx_get_bios_version(adapter); 1128 if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios)) 1129 return -EIO; 1130 if ((__force u32)val != bios) { 1131 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 1132 fw_name[fw_type]); 1133 return -EINVAL; 1134 } 1135 1136 if (netxen_rom_fast_read(adapter, 1137 NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { 1138 dev_err(&pdev->dev, "Unable to read flash fw version\n"); 1139 return -EIO; 1140 } 1141 flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); 1142 1143 /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ 1144 crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); 1145 if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && 1146 NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1147 dev_err(&pdev->dev, "Incompatibility detected between driver " 1148 "and firmware version on flash. This configuration " 1149 "is not recommended. Please update the firmware on " 1150 "flash immediately\n"); 1151 return -EINVAL; 1152 } 1153 1154 /* check if flashed firmware is newer only for no-mn and P2 case*/ 1155 if (!netxen_p3_has_mn(adapter) || 1156 NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1157 if (flash_fw_ver > file_fw_ver) { 1158 dev_info(&pdev->dev, "%s: firmware is older than flash\n", 1159 fw_name[fw_type]); 1160 return -EINVAL; 1161 } 1162 } 1163 1164 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 1165 return 0; 1166 } 1167 1168 static void 1169 nx_get_next_fwtype(struct netxen_adapter *adapter) 1170 { 1171 u8 fw_type; 1172 1173 switch (adapter->fw_type) { 1174 case NX_UNKNOWN_ROMIMAGE: 1175 fw_type = NX_UNIFIED_ROMIMAGE; 1176 break; 1177 1178 case NX_UNIFIED_ROMIMAGE: 1179 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) 1180 fw_type = NX_FLASH_ROMIMAGE; 1181 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1182 fw_type = NX_P2_MN_ROMIMAGE; 1183 else if (netxen_p3_has_mn(adapter)) 1184 fw_type = NX_P3_MN_ROMIMAGE; 1185 else 1186 fw_type = NX_P3_CT_ROMIMAGE; 1187 break; 1188 1189 case NX_P3_MN_ROMIMAGE: 1190 fw_type = NX_P3_CT_ROMIMAGE; 1191 break; 1192 1193 case NX_P2_MN_ROMIMAGE: 1194 case NX_P3_CT_ROMIMAGE: 1195 default: 1196 fw_type = NX_FLASH_ROMIMAGE; 1197 break; 1198 } 1199 1200 adapter->fw_type = fw_type; 1201 } 1202 1203 static int 1204 netxen_p3_has_mn(struct netxen_adapter *adapter) 1205 { 1206 u32 capability, flashed_ver; 1207 capability = 0; 1208 1209 /* NX2031 always had MN */ 1210 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1211 return 1; 1212 1213 netxen_rom_fast_read(adapter, 1214 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 1215 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); 1216 1217 if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { 1218 1219 capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); 1220 if (capability & NX_PEG_TUNE_MN_PRESENT) 1221 return 1; 1222 } 1223 return 0; 1224 } 1225 1226 void netxen_request_firmware(struct netxen_adapter *adapter) 1227 { 1228 struct pci_dev *pdev = adapter->pdev; 1229 int rc = 0; 1230 1231 adapter->fw_type = NX_UNKNOWN_ROMIMAGE; 1232 1233 next: 1234 nx_get_next_fwtype(adapter); 1235 1236 if (adapter->fw_type == NX_FLASH_ROMIMAGE) { 1237 adapter->fw = NULL; 1238 } else { 1239 rc = request_firmware(&adapter->fw, 1240 fw_name[adapter->fw_type], &pdev->dev); 1241 if (rc != 0) 1242 goto next; 1243 1244 rc = netxen_validate_firmware(adapter); 1245 if (rc != 0) { 1246 release_firmware(adapter->fw); 1247 msleep(1); 1248 goto next; 1249 } 1250 } 1251 } 1252 1253 1254 void 1255 netxen_release_firmware(struct netxen_adapter *adapter) 1256 { 1257 release_firmware(adapter->fw); 1258 adapter->fw = NULL; 1259 } 1260 1261 int netxen_init_dummy_dma(struct netxen_adapter *adapter) 1262 { 1263 u64 addr; 1264 u32 hi, lo; 1265 1266 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1267 return 0; 1268 1269 adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, 1270 NETXEN_HOST_DUMMY_DMA_SIZE, 1271 &adapter->dummy_dma.phys_addr); 1272 if (adapter->dummy_dma.addr == NULL) { 1273 dev_err(&adapter->pdev->dev, 1274 "ERROR: Could not allocate dummy DMA memory\n"); 1275 return -ENOMEM; 1276 } 1277 1278 addr = (uint64_t) adapter->dummy_dma.phys_addr; 1279 hi = (addr >> 32) & 0xffffffff; 1280 lo = addr & 0xffffffff; 1281 1282 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); 1283 NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); 1284 1285 return 0; 1286 } 1287 1288 /* 1289 * NetXen DMA watchdog control: 1290 * 1291 * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive 1292 * Bit 1 : disable_request => 1 req disable dma watchdog 1293 * Bit 2 : enable_request => 1 req enable dma watchdog 1294 * Bit 3-31 : unused 1295 */ 1296 void netxen_free_dummy_dma(struct netxen_adapter *adapter) 1297 { 1298 int i = 100; 1299 u32 ctrl; 1300 1301 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1302 return; 1303 1304 if (!adapter->dummy_dma.addr) 1305 return; 1306 1307 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); 1308 if ((ctrl & 0x1) != 0) { 1309 NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); 1310 1311 while ((ctrl & 0x1) != 0) { 1312 1313 msleep(50); 1314 1315 ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); 1316 1317 if (--i == 0) 1318 break; 1319 } 1320 } 1321 1322 if (i) { 1323 pci_free_consistent(adapter->pdev, 1324 NETXEN_HOST_DUMMY_DMA_SIZE, 1325 adapter->dummy_dma.addr, 1326 adapter->dummy_dma.phys_addr); 1327 adapter->dummy_dma.addr = NULL; 1328 } else 1329 dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); 1330 } 1331 1332 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) 1333 { 1334 u32 val = 0; 1335 int retries = 60; 1336 1337 if (pegtune_val) 1338 return 0; 1339 1340 do { 1341 val = NXRD32(adapter, CRB_CMDPEG_STATE); 1342 switch (val) { 1343 case PHAN_INITIALIZE_COMPLETE: 1344 case PHAN_INITIALIZE_ACK: 1345 return 0; 1346 case PHAN_INITIALIZE_FAILED: 1347 goto out_err; 1348 default: 1349 break; 1350 } 1351 1352 msleep(500); 1353 1354 } while (--retries); 1355 1356 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1357 1358 out_err: 1359 dev_warn(&adapter->pdev->dev, "firmware init failed\n"); 1360 return -EIO; 1361 } 1362 1363 static int 1364 netxen_receive_peg_ready(struct netxen_adapter *adapter) 1365 { 1366 u32 val = 0; 1367 int retries = 2000; 1368 1369 do { 1370 val = NXRD32(adapter, CRB_RCVPEG_STATE); 1371 1372 if (val == PHAN_PEG_RCV_INITIALIZED) 1373 return 0; 1374 1375 msleep(10); 1376 1377 } while (--retries); 1378 1379 pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val); 1380 return -EIO; 1381 } 1382 1383 int netxen_init_firmware(struct netxen_adapter *adapter) 1384 { 1385 int err; 1386 1387 err = netxen_receive_peg_ready(adapter); 1388 if (err) 1389 return err; 1390 1391 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); 1392 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); 1393 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); 1394 1395 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1396 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); 1397 1398 return err; 1399 } 1400 1401 static void 1402 netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) 1403 { 1404 u32 cable_OUI; 1405 u16 cable_len; 1406 u16 link_speed; 1407 u8 link_status, module, duplex, autoneg; 1408 struct net_device *netdev = adapter->netdev; 1409 1410 adapter->has_link_events = 1; 1411 1412 cable_OUI = msg->body[1] & 0xffffffff; 1413 cable_len = (msg->body[1] >> 32) & 0xffff; 1414 link_speed = (msg->body[1] >> 48) & 0xffff; 1415 1416 link_status = msg->body[2] & 0xff; 1417 duplex = (msg->body[2] >> 16) & 0xff; 1418 autoneg = (msg->body[2] >> 24) & 0xff; 1419 1420 module = (msg->body[2] >> 8) & 0xff; 1421 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { 1422 printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", 1423 netdev->name, cable_OUI, cable_len); 1424 } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { 1425 printk(KERN_INFO "%s: unsupported cable length %d\n", 1426 netdev->name, cable_len); 1427 } 1428 1429 /* update link parameters */ 1430 if (duplex == LINKEVENT_FULL_DUPLEX) 1431 adapter->link_duplex = DUPLEX_FULL; 1432 else 1433 adapter->link_duplex = DUPLEX_HALF; 1434 adapter->module_type = module; 1435 adapter->link_autoneg = autoneg; 1436 adapter->link_speed = link_speed; 1437 1438 netxen_advert_link_change(adapter, link_status); 1439 } 1440 1441 static void 1442 netxen_handle_fw_message(int desc_cnt, int index, 1443 struct nx_host_sds_ring *sds_ring) 1444 { 1445 nx_fw_msg_t msg; 1446 struct status_desc *desc; 1447 int i = 0, opcode; 1448 1449 while (desc_cnt > 0 && i < 8) { 1450 desc = &sds_ring->desc_head[index]; 1451 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); 1452 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); 1453 1454 index = get_next_index(index, sds_ring->num_desc); 1455 desc_cnt--; 1456 } 1457 1458 opcode = netxen_get_nic_msg_opcode(msg.body[0]); 1459 switch (opcode) { 1460 case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: 1461 netxen_handle_linkevent(sds_ring->adapter, &msg); 1462 break; 1463 default: 1464 break; 1465 } 1466 } 1467 1468 static int 1469 netxen_alloc_rx_skb(struct netxen_adapter *adapter, 1470 struct nx_host_rds_ring *rds_ring, 1471 struct netxen_rx_buffer *buffer) 1472 { 1473 struct sk_buff *skb; 1474 dma_addr_t dma; 1475 struct pci_dev *pdev = adapter->pdev; 1476 1477 buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); 1478 if (!buffer->skb) 1479 return 1; 1480 1481 skb = buffer->skb; 1482 1483 if (!adapter->ahw.cut_through) 1484 skb_reserve(skb, 2); 1485 1486 dma = pci_map_single(pdev, skb->data, 1487 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1488 1489 if (pci_dma_mapping_error(pdev, dma)) { 1490 dev_kfree_skb_any(skb); 1491 buffer->skb = NULL; 1492 return 1; 1493 } 1494 1495 buffer->skb = skb; 1496 buffer->dma = dma; 1497 buffer->state = NETXEN_BUFFER_BUSY; 1498 1499 return 0; 1500 } 1501 1502 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, 1503 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) 1504 { 1505 struct netxen_rx_buffer *buffer; 1506 struct sk_buff *skb; 1507 1508 buffer = &rds_ring->rx_buf_arr[index]; 1509 1510 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, 1511 PCI_DMA_FROMDEVICE); 1512 1513 skb = buffer->skb; 1514 if (!skb) 1515 goto no_skb; 1516 1517 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) 1518 && cksum == STATUS_CKSUM_OK)) { 1519 adapter->stats.csummed++; 1520 skb->ip_summed = CHECKSUM_UNNECESSARY; 1521 } else 1522 skb->ip_summed = CHECKSUM_NONE; 1523 1524 buffer->skb = NULL; 1525 no_skb: 1526 buffer->state = NETXEN_BUFFER_FREE; 1527 return skb; 1528 } 1529 1530 static struct netxen_rx_buffer * 1531 netxen_process_rcv(struct netxen_adapter *adapter, 1532 struct nx_host_sds_ring *sds_ring, 1533 int ring, u64 sts_data0) 1534 { 1535 struct net_device *netdev = adapter->netdev; 1536 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1537 struct netxen_rx_buffer *buffer; 1538 struct sk_buff *skb; 1539 struct nx_host_rds_ring *rds_ring; 1540 int index, length, cksum, pkt_offset; 1541 1542 if (unlikely(ring >= adapter->max_rds_rings)) 1543 return NULL; 1544 1545 rds_ring = &recv_ctx->rds_rings[ring]; 1546 1547 index = netxen_get_sts_refhandle(sts_data0); 1548 if (unlikely(index >= rds_ring->num_desc)) 1549 return NULL; 1550 1551 buffer = &rds_ring->rx_buf_arr[index]; 1552 1553 length = netxen_get_sts_totallength(sts_data0); 1554 cksum = netxen_get_sts_status(sts_data0); 1555 pkt_offset = netxen_get_sts_pkt_offset(sts_data0); 1556 1557 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); 1558 if (!skb) 1559 return buffer; 1560 1561 if (length > rds_ring->skb_size) 1562 skb_put(skb, rds_ring->skb_size); 1563 else 1564 skb_put(skb, length); 1565 1566 1567 if (pkt_offset) 1568 skb_pull(skb, pkt_offset); 1569 1570 skb->protocol = eth_type_trans(skb, netdev); 1571 1572 napi_gro_receive(&sds_ring->napi, skb); 1573 1574 adapter->stats.rx_pkts++; 1575 adapter->stats.rxbytes += length; 1576 1577 return buffer; 1578 } 1579 1580 #define TCP_HDR_SIZE 20 1581 #define TCP_TS_OPTION_SIZE 12 1582 #define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) 1583 1584 static struct netxen_rx_buffer * 1585 netxen_process_lro(struct netxen_adapter *adapter, 1586 struct nx_host_sds_ring *sds_ring, 1587 int ring, u64 sts_data0, u64 sts_data1) 1588 { 1589 struct net_device *netdev = adapter->netdev; 1590 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1591 struct netxen_rx_buffer *buffer; 1592 struct sk_buff *skb; 1593 struct nx_host_rds_ring *rds_ring; 1594 struct iphdr *iph; 1595 struct tcphdr *th; 1596 bool push, timestamp; 1597 int l2_hdr_offset, l4_hdr_offset; 1598 int index; 1599 u16 lro_length, length, data_offset; 1600 u32 seq_number; 1601 u8 vhdr_len = 0; 1602 1603 if (unlikely(ring >= adapter->max_rds_rings)) 1604 return NULL; 1605 1606 rds_ring = &recv_ctx->rds_rings[ring]; 1607 1608 index = netxen_get_lro_sts_refhandle(sts_data0); 1609 if (unlikely(index >= rds_ring->num_desc)) 1610 return NULL; 1611 1612 buffer = &rds_ring->rx_buf_arr[index]; 1613 1614 timestamp = netxen_get_lro_sts_timestamp(sts_data0); 1615 lro_length = netxen_get_lro_sts_length(sts_data0); 1616 l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); 1617 l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); 1618 push = netxen_get_lro_sts_push_flag(sts_data0); 1619 seq_number = netxen_get_lro_sts_seq_number(sts_data1); 1620 1621 skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); 1622 if (!skb) 1623 return buffer; 1624 1625 if (timestamp) 1626 data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; 1627 else 1628 data_offset = l4_hdr_offset + TCP_HDR_SIZE; 1629 1630 skb_put(skb, lro_length + data_offset); 1631 1632 skb_pull(skb, l2_hdr_offset); 1633 skb->protocol = eth_type_trans(skb, netdev); 1634 1635 if (skb->protocol == htons(ETH_P_8021Q)) 1636 vhdr_len = VLAN_HLEN; 1637 iph = (struct iphdr *)(skb->data + vhdr_len); 1638 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); 1639 1640 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1641 csum_replace2(&iph->check, iph->tot_len, htons(length)); 1642 iph->tot_len = htons(length); 1643 th->psh = push; 1644 th->seq = htonl(seq_number); 1645 1646 length = skb->len; 1647 1648 if (adapter->flags & NETXEN_FW_MSS_CAP) 1649 skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1); 1650 1651 netif_receive_skb(skb); 1652 1653 adapter->stats.lro_pkts++; 1654 adapter->stats.rxbytes += length; 1655 1656 return buffer; 1657 } 1658 1659 #define netxen_merge_rx_buffers(list, head) \ 1660 do { list_splice_tail_init(list, head); } while (0); 1661 1662 int 1663 netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) 1664 { 1665 struct netxen_adapter *adapter = sds_ring->adapter; 1666 1667 struct list_head *cur; 1668 1669 struct status_desc *desc; 1670 struct netxen_rx_buffer *rxbuf; 1671 1672 u32 consumer = sds_ring->consumer; 1673 1674 int count = 0; 1675 u64 sts_data0, sts_data1; 1676 int opcode, ring = 0, desc_cnt; 1677 1678 while (count < max) { 1679 desc = &sds_ring->desc_head[consumer]; 1680 sts_data0 = le64_to_cpu(desc->status_desc_data[0]); 1681 1682 if (!(sts_data0 & STATUS_OWNER_HOST)) 1683 break; 1684 1685 desc_cnt = netxen_get_sts_desc_cnt(sts_data0); 1686 1687 opcode = netxen_get_sts_opcode(sts_data0); 1688 1689 switch (opcode) { 1690 case NETXEN_NIC_RXPKT_DESC: 1691 case NETXEN_OLD_RXPKT_DESC: 1692 case NETXEN_NIC_SYN_OFFLOAD: 1693 ring = netxen_get_sts_type(sts_data0); 1694 rxbuf = netxen_process_rcv(adapter, sds_ring, 1695 ring, sts_data0); 1696 break; 1697 case NETXEN_NIC_LRO_DESC: 1698 ring = netxen_get_lro_sts_type(sts_data0); 1699 sts_data1 = le64_to_cpu(desc->status_desc_data[1]); 1700 rxbuf = netxen_process_lro(adapter, sds_ring, 1701 ring, sts_data0, sts_data1); 1702 break; 1703 case NETXEN_NIC_RESPONSE_DESC: 1704 netxen_handle_fw_message(desc_cnt, consumer, sds_ring); 1705 default: 1706 goto skip; 1707 } 1708 1709 WARN_ON(desc_cnt > 1); 1710 1711 if (rxbuf) 1712 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1713 1714 skip: 1715 for (; desc_cnt > 0; desc_cnt--) { 1716 desc = &sds_ring->desc_head[consumer]; 1717 desc->status_desc_data[0] = 1718 cpu_to_le64(STATUS_OWNER_PHANTOM); 1719 consumer = get_next_index(consumer, sds_ring->num_desc); 1720 } 1721 count++; 1722 } 1723 1724 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1725 struct nx_host_rds_ring *rds_ring = 1726 &adapter->recv_ctx.rds_rings[ring]; 1727 1728 if (!list_empty(&sds_ring->free_list[ring])) { 1729 list_for_each(cur, &sds_ring->free_list[ring]) { 1730 rxbuf = list_entry(cur, 1731 struct netxen_rx_buffer, list); 1732 netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); 1733 } 1734 spin_lock(&rds_ring->lock); 1735 netxen_merge_rx_buffers(&sds_ring->free_list[ring], 1736 &rds_ring->free_list); 1737 spin_unlock(&rds_ring->lock); 1738 } 1739 1740 netxen_post_rx_buffers_nodb(adapter, rds_ring); 1741 } 1742 1743 if (count) { 1744 sds_ring->consumer = consumer; 1745 NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); 1746 } 1747 1748 return count; 1749 } 1750 1751 /* Process Command status ring */ 1752 int netxen_process_cmd_ring(struct netxen_adapter *adapter) 1753 { 1754 u32 sw_consumer, hw_consumer; 1755 int count = 0, i; 1756 struct netxen_cmd_buffer *buffer; 1757 struct pci_dev *pdev = adapter->pdev; 1758 struct net_device *netdev = adapter->netdev; 1759 struct netxen_skb_frag *frag; 1760 int done = 0; 1761 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 1762 1763 if (!spin_trylock_bh(&adapter->tx_clean_lock)) 1764 return 1; 1765 1766 sw_consumer = tx_ring->sw_consumer; 1767 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1768 1769 while (sw_consumer != hw_consumer) { 1770 buffer = &tx_ring->cmd_buf_arr[sw_consumer]; 1771 if (buffer->skb) { 1772 frag = &buffer->frag_array[0]; 1773 pci_unmap_single(pdev, frag->dma, frag->length, 1774 PCI_DMA_TODEVICE); 1775 frag->dma = 0ULL; 1776 for (i = 1; i < buffer->frag_count; i++) { 1777 frag++; /* Get the next frag */ 1778 pci_unmap_page(pdev, frag->dma, frag->length, 1779 PCI_DMA_TODEVICE); 1780 frag->dma = 0ULL; 1781 } 1782 1783 adapter->stats.xmitfinished++; 1784 dev_kfree_skb_any(buffer->skb); 1785 buffer->skb = NULL; 1786 } 1787 1788 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); 1789 if (++count >= MAX_STATUS_HANDLE) 1790 break; 1791 } 1792 1793 tx_ring->sw_consumer = sw_consumer; 1794 1795 if (count && netif_running(netdev)) { 1796 smp_mb(); 1797 1798 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) 1799 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) 1800 netif_wake_queue(netdev); 1801 adapter->tx_timeo_cnt = 0; 1802 } 1803 /* 1804 * If everything is freed up to consumer then check if the ring is full 1805 * If the ring is full then check if more needs to be freed and 1806 * schedule the call back again. 1807 * 1808 * This happens when there are 2 CPUs. One could be freeing and the 1809 * other filling it. If the ring is full when we get out of here and 1810 * the card has already interrupted the host then the host can miss the 1811 * interrupt. 1812 * 1813 * There is still a possible race condition and the host could miss an 1814 * interrupt. The card has to take care of this. 1815 */ 1816 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1817 done = (sw_consumer == hw_consumer); 1818 spin_unlock_bh(&adapter->tx_clean_lock); 1819 1820 return done; 1821 } 1822 1823 void 1824 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, 1825 struct nx_host_rds_ring *rds_ring) 1826 { 1827 struct rcv_desc *pdesc; 1828 struct netxen_rx_buffer *buffer; 1829 int producer, count = 0; 1830 netxen_ctx_msg msg = 0; 1831 struct list_head *head; 1832 1833 producer = rds_ring->producer; 1834 1835 head = &rds_ring->free_list; 1836 while (!list_empty(head)) { 1837 1838 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1839 1840 if (!buffer->skb) { 1841 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) 1842 break; 1843 } 1844 1845 count++; 1846 list_del(&buffer->list); 1847 1848 /* make a rcv descriptor */ 1849 pdesc = &rds_ring->desc_head[producer]; 1850 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1851 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1852 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1853 1854 producer = get_next_index(producer, rds_ring->num_desc); 1855 } 1856 1857 if (count) { 1858 rds_ring->producer = producer; 1859 NXWRIO(adapter, rds_ring->crb_rcv_producer, 1860 (producer-1) & (rds_ring->num_desc-1)); 1861 1862 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1863 /* 1864 * Write a doorbell msg to tell phanmon of change in 1865 * receive ring producer 1866 * Only for firmware version < 4.0.0 1867 */ 1868 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); 1869 netxen_set_msg_privid(msg); 1870 netxen_set_msg_count(msg, 1871 ((producer - 1) & 1872 (rds_ring->num_desc - 1))); 1873 netxen_set_msg_ctxid(msg, adapter->portnum); 1874 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); 1875 NXWRIO(adapter, DB_NORMALIZE(adapter, 1876 NETXEN_RCV_PRODUCER_OFFSET), msg); 1877 } 1878 } 1879 } 1880 1881 static void 1882 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 1883 struct nx_host_rds_ring *rds_ring) 1884 { 1885 struct rcv_desc *pdesc; 1886 struct netxen_rx_buffer *buffer; 1887 int producer, count = 0; 1888 struct list_head *head; 1889 1890 if (!spin_trylock(&rds_ring->lock)) 1891 return; 1892 1893 producer = rds_ring->producer; 1894 1895 head = &rds_ring->free_list; 1896 while (!list_empty(head)) { 1897 1898 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1899 1900 if (!buffer->skb) { 1901 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) 1902 break; 1903 } 1904 1905 count++; 1906 list_del(&buffer->list); 1907 1908 /* make a rcv descriptor */ 1909 pdesc = &rds_ring->desc_head[producer]; 1910 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1911 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1912 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1913 1914 producer = get_next_index(producer, rds_ring->num_desc); 1915 } 1916 1917 if (count) { 1918 rds_ring->producer = producer; 1919 NXWRIO(adapter, rds_ring->crb_rcv_producer, 1920 (producer - 1) & (rds_ring->num_desc - 1)); 1921 } 1922 spin_unlock(&rds_ring->lock); 1923 } 1924 1925 void netxen_nic_clear_stats(struct netxen_adapter *adapter) 1926 { 1927 memset(&adapter->stats, 0, sizeof(adapter->stats)); 1928 } 1929 1930