1 /* 2 * Copyright (C) 2003 - 2009 NetXen, Inc. 3 * Copyright (C) 2009 - QLogic Corporation. 4 * All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * 19 * The full GNU General Public License is included in this distribution 20 * in the file called "COPYING". 21 * 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <linux/interrupt.h> 27 #include "netxen_nic_hw.h" 28 29 #include "netxen_nic.h" 30 31 #include <linux/dma-mapping.h> 32 #include <linux/if_vlan.h> 33 #include <net/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/inetdevice.h> 36 #include <linux/sysfs.h> 37 #include <linux/aer.h> 38 39 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); 40 MODULE_LICENSE("GPL"); 41 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 42 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 43 44 char netxen_nic_driver_name[] = "netxen_nic"; 45 static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" 46 NETXEN_NIC_LINUX_VERSIONID; 47 48 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; 49 50 /* Default to restricted 1G auto-neg mode */ 51 static int wol_port_mode = 5; 52 53 static int use_msi = 1; 54 55 static int use_msi_x = 1; 56 57 static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 58 module_param(auto_fw_reset, int, 0644); 59 MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); 60 61 static int netxen_nic_probe(struct pci_dev *pdev, 62 const struct pci_device_id *ent); 63 static void netxen_nic_remove(struct pci_dev *pdev); 64 static int netxen_nic_open(struct net_device *netdev); 65 static int netxen_nic_close(struct net_device *netdev); 66 static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, 67 struct net_device *); 68 static void netxen_tx_timeout(struct net_device *netdev); 69 static void netxen_tx_timeout_task(struct work_struct *work); 70 static void netxen_fw_poll_work(struct work_struct *work); 71 static void netxen_schedule_work(struct netxen_adapter *adapter, 72 work_func_t func, int delay); 73 static void netxen_cancel_fw_work(struct netxen_adapter *adapter); 74 static int netxen_nic_poll(struct napi_struct *napi, int budget); 75 #ifdef CONFIG_NET_POLL_CONTROLLER 76 static void netxen_nic_poll_controller(struct net_device *netdev); 77 #endif 78 79 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 80 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 81 static void netxen_create_diag_entries(struct netxen_adapter *adapter); 82 static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 83 static int nx_dev_request_aer(struct netxen_adapter *adapter); 84 static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 85 static int netxen_can_start_firmware(struct netxen_adapter *adapter); 86 87 static irqreturn_t netxen_intr(int irq, void *data); 88 static irqreturn_t netxen_msi_intr(int irq, void *data); 89 static irqreturn_t netxen_msix_intr(int irq, void *data); 90 91 static void netxen_free_ip_list(struct netxen_adapter *, bool); 92 static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); 93 static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, 94 struct rtnl_link_stats64 *stats); 95 static int netxen_nic_set_mac(struct net_device *netdev, void *p); 96 97 /* PCI Device ID Table */ 98 #define ENTRY(device) \ 99 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 101 102 static const struct pci_device_id netxen_pci_tbl[] = { 103 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 104 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 105 ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 106 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), 107 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), 108 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), 109 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), 110 ENTRY(PCI_DEVICE_ID_NX3031), 111 {0,} 112 }; 113 114 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 115 116 static uint32_t crb_cmd_producer[4] = { 117 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, 118 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 119 }; 120 121 void 122 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 123 struct nx_host_tx_ring *tx_ring) 124 { 125 NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); 126 } 127 128 static uint32_t crb_cmd_consumer[4] = { 129 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, 130 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 131 }; 132 133 static inline void 134 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, 135 struct nx_host_tx_ring *tx_ring) 136 { 137 NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); 138 } 139 140 static uint32_t msi_tgt_status[8] = { 141 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, 142 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, 143 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, 144 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 145 }; 146 147 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 148 149 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) 150 { 151 struct netxen_adapter *adapter = sds_ring->adapter; 152 153 NXWRIO(adapter, sds_ring->crb_intr_mask, 0); 154 } 155 156 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) 157 { 158 struct netxen_adapter *adapter = sds_ring->adapter; 159 160 NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); 161 162 if (!NETXEN_IS_MSI_FAMILY(adapter)) 163 NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); 164 } 165 166 static int 167 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) 168 { 169 int size = sizeof(struct nx_host_sds_ring) * count; 170 171 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 172 173 return recv_ctx->sds_rings == NULL; 174 } 175 176 static void 177 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) 178 { 179 if (recv_ctx->sds_rings != NULL) 180 kfree(recv_ctx->sds_rings); 181 182 recv_ctx->sds_rings = NULL; 183 } 184 185 static int 186 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) 187 { 188 int ring; 189 struct nx_host_sds_ring *sds_ring; 190 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 191 192 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 193 return -ENOMEM; 194 195 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 196 sds_ring = &recv_ctx->sds_rings[ring]; 197 netif_napi_add(netdev, &sds_ring->napi, 198 netxen_nic_poll, NAPI_POLL_WEIGHT); 199 } 200 201 return 0; 202 } 203 204 static void 205 netxen_napi_del(struct netxen_adapter *adapter) 206 { 207 int ring; 208 struct nx_host_sds_ring *sds_ring; 209 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 210 211 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 212 sds_ring = &recv_ctx->sds_rings[ring]; 213 netif_napi_del(&sds_ring->napi); 214 } 215 216 netxen_free_sds_rings(&adapter->recv_ctx); 217 } 218 219 static void 220 netxen_napi_enable(struct netxen_adapter *adapter) 221 { 222 int ring; 223 struct nx_host_sds_ring *sds_ring; 224 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 225 226 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 227 sds_ring = &recv_ctx->sds_rings[ring]; 228 napi_enable(&sds_ring->napi); 229 netxen_nic_enable_int(sds_ring); 230 } 231 } 232 233 static void 234 netxen_napi_disable(struct netxen_adapter *adapter) 235 { 236 int ring; 237 struct nx_host_sds_ring *sds_ring; 238 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 239 240 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 241 sds_ring = &recv_ctx->sds_rings[ring]; 242 netxen_nic_disable_int(sds_ring); 243 napi_synchronize(&sds_ring->napi); 244 napi_disable(&sds_ring->napi); 245 } 246 } 247 248 static int nx_set_dma_mask(struct netxen_adapter *adapter) 249 { 250 struct pci_dev *pdev = adapter->pdev; 251 uint64_t mask, cmask; 252 253 adapter->pci_using_dac = 0; 254 255 mask = DMA_BIT_MASK(32); 256 cmask = DMA_BIT_MASK(32); 257 258 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 259 #ifndef CONFIG_IA64 260 mask = DMA_BIT_MASK(35); 261 #endif 262 } else { 263 mask = DMA_BIT_MASK(39); 264 cmask = mask; 265 } 266 267 if (pci_set_dma_mask(pdev, mask) == 0 && 268 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 269 adapter->pci_using_dac = 1; 270 return 0; 271 } 272 273 return -EIO; 274 } 275 276 /* Update addressable range if firmware supports it */ 277 static int 278 nx_update_dma_mask(struct netxen_adapter *adapter) 279 { 280 int change, shift, err; 281 uint64_t mask, old_mask, old_cmask; 282 struct pci_dev *pdev = adapter->pdev; 283 284 change = 0; 285 286 shift = NXRD32(adapter, CRB_DMA_SHIFT); 287 if (shift > 32) 288 return 0; 289 290 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) 291 change = 1; 292 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) 293 change = 1; 294 295 if (change) { 296 old_mask = pdev->dma_mask; 297 old_cmask = pdev->dev.coherent_dma_mask; 298 299 mask = DMA_BIT_MASK(32+shift); 300 301 err = pci_set_dma_mask(pdev, mask); 302 if (err) 303 goto err_out; 304 305 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 306 307 err = pci_set_consistent_dma_mask(pdev, mask); 308 if (err) 309 goto err_out; 310 } 311 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); 312 } 313 314 return 0; 315 316 err_out: 317 pci_set_dma_mask(pdev, old_mask); 318 pci_set_consistent_dma_mask(pdev, old_cmask); 319 return err; 320 } 321 322 static int 323 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) 324 { 325 u32 val, timeout; 326 327 if (first_boot == 0x55555555) { 328 /* This is the first boot after power up */ 329 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 330 331 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 332 return 0; 333 334 /* PCI bus master workaround */ 335 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 336 if (!(first_boot & 0x4)) { 337 first_boot |= 0x4; 338 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); 339 NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 340 } 341 342 /* This is the first boot after power up */ 343 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); 344 if (first_boot != 0x80000f) { 345 /* clear the register for future unloads/loads */ 346 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); 347 return -EIO; 348 } 349 350 /* Start P2 boot loader */ 351 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); 352 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); 353 timeout = 0; 354 do { 355 msleep(1); 356 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 357 358 if (++timeout > 5000) 359 return -EIO; 360 361 } while (val == NETXEN_BDINFO_MAGIC); 362 } 363 return 0; 364 } 365 366 static void netxen_set_port_mode(struct netxen_adapter *adapter) 367 { 368 u32 val, data; 369 370 val = adapter->ahw.board_type; 371 if ((val == NETXEN_BRDTYPE_P3_HMEZ) || 372 (val == NETXEN_BRDTYPE_P3_XG_LOM)) { 373 if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 374 data = NETXEN_PORT_MODE_802_3_AP; 375 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 376 } else if (port_mode == NETXEN_PORT_MODE_XG) { 377 data = NETXEN_PORT_MODE_XG; 378 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 379 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { 380 data = NETXEN_PORT_MODE_AUTO_NEG_1G; 381 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 382 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { 383 data = NETXEN_PORT_MODE_AUTO_NEG_XG; 384 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 385 } else { 386 data = NETXEN_PORT_MODE_AUTO_NEG; 387 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 388 } 389 390 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && 391 (wol_port_mode != NETXEN_PORT_MODE_XG) && 392 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && 393 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { 394 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; 395 } 396 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); 397 } 398 } 399 400 #define PCI_CAP_ID_GEN 0x10 401 402 static void netxen_pcie_strap_init(struct netxen_adapter *adapter) 403 { 404 u32 pdevfuncsave; 405 u32 c8c9value = 0; 406 u32 chicken = 0; 407 u32 control = 0; 408 int i, pos; 409 struct pci_dev *pdev; 410 411 pdev = adapter->pdev; 412 413 chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3)); 414 /* clear chicken3.25:24 */ 415 chicken &= 0xFCFFFFFF; 416 /* 417 * if gen1 and B0, set F1020 - if gen 2, do nothing 418 * if gen2 set to F1000 419 */ 420 pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); 421 if (pos == 0xC0) { 422 pci_read_config_dword(pdev, pos + 0x10, &control); 423 if ((control & 0x000F0000) != 0x00020000) { 424 /* set chicken3.24 if gen1 */ 425 chicken |= 0x01000000; 426 } 427 dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n"); 428 c8c9value = 0xF1000; 429 } else { 430 /* set chicken3.24 if gen1 */ 431 chicken |= 0x01000000; 432 dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n"); 433 if (adapter->ahw.revision_id == NX_P3_B0) 434 c8c9value = 0xF1020; 435 else 436 c8c9value = 0; 437 } 438 439 NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken); 440 441 if (!c8c9value) 442 return; 443 444 pdevfuncsave = pdev->devfn; 445 if (pdevfuncsave & 0x07) 446 return; 447 448 for (i = 0; i < 8; i++) { 449 pci_read_config_dword(pdev, pos + 8, &control); 450 pci_read_config_dword(pdev, pos + 8, &control); 451 pci_write_config_dword(pdev, pos + 8, c8c9value); 452 pdev->devfn++; 453 } 454 pdev->devfn = pdevfuncsave; 455 } 456 457 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) 458 { 459 u32 control; 460 461 if (pdev->msix_cap) { 462 pci_read_config_dword(pdev, pdev->msix_cap, &control); 463 if (enable) 464 control |= PCI_MSIX_FLAGS_ENABLE; 465 else 466 control = 0; 467 pci_write_config_dword(pdev, pdev->msix_cap, control); 468 } 469 } 470 471 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) 472 { 473 int i; 474 475 for (i = 0; i < count; i++) 476 adapter->msix_entries[i].entry = i; 477 } 478 479 static int 480 netxen_read_mac_addr(struct netxen_adapter *adapter) 481 { 482 int i; 483 unsigned char *p; 484 u64 mac_addr; 485 struct net_device *netdev = adapter->netdev; 486 struct pci_dev *pdev = adapter->pdev; 487 488 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 489 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) 490 return -EIO; 491 } else { 492 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) 493 return -EIO; 494 } 495 496 p = (unsigned char *)&mac_addr; 497 for (i = 0; i < 6; i++) 498 netdev->dev_addr[i] = *(p + 5 - i); 499 500 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 501 502 /* set station address */ 503 504 if (!is_valid_ether_addr(netdev->dev_addr)) 505 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); 506 507 return 0; 508 } 509 510 static int netxen_nic_set_mac(struct net_device *netdev, void *p) 511 { 512 struct netxen_adapter *adapter = netdev_priv(netdev); 513 struct sockaddr *addr = p; 514 515 if (!is_valid_ether_addr(addr->sa_data)) 516 return -EADDRNOTAVAIL; 517 518 if (netif_running(netdev)) { 519 netif_device_detach(netdev); 520 netxen_napi_disable(adapter); 521 } 522 523 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); 524 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 525 adapter->macaddr_set(adapter, addr->sa_data); 526 527 if (netif_running(netdev)) { 528 netif_device_attach(netdev); 529 netxen_napi_enable(adapter); 530 } 531 return 0; 532 } 533 534 static void netxen_set_multicast_list(struct net_device *dev) 535 { 536 struct netxen_adapter *adapter = netdev_priv(dev); 537 538 adapter->set_multi(dev); 539 } 540 541 static netdev_features_t netxen_fix_features(struct net_device *dev, 542 netdev_features_t features) 543 { 544 if (!(features & NETIF_F_RXCSUM)) { 545 netdev_info(dev, "disabling LRO as RXCSUM is off\n"); 546 547 features &= ~NETIF_F_LRO; 548 } 549 550 return features; 551 } 552 553 static int netxen_set_features(struct net_device *dev, 554 netdev_features_t features) 555 { 556 struct netxen_adapter *adapter = netdev_priv(dev); 557 int hw_lro; 558 559 if (!((dev->features ^ features) & NETIF_F_LRO)) 560 return 0; 561 562 hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED 563 : NETXEN_NIC_LRO_DISABLED; 564 565 if (netxen_config_hw_lro(adapter, hw_lro)) 566 return -EIO; 567 568 if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) 569 return -EIO; 570 571 return 0; 572 } 573 574 static const struct net_device_ops netxen_netdev_ops = { 575 .ndo_open = netxen_nic_open, 576 .ndo_stop = netxen_nic_close, 577 .ndo_start_xmit = netxen_nic_xmit_frame, 578 .ndo_get_stats64 = netxen_nic_get_stats, 579 .ndo_validate_addr = eth_validate_addr, 580 .ndo_set_rx_mode = netxen_set_multicast_list, 581 .ndo_set_mac_address = netxen_nic_set_mac, 582 .ndo_change_mtu = netxen_nic_change_mtu, 583 .ndo_tx_timeout = netxen_tx_timeout, 584 .ndo_fix_features = netxen_fix_features, 585 .ndo_set_features = netxen_set_features, 586 #ifdef CONFIG_NET_POLL_CONTROLLER 587 .ndo_poll_controller = netxen_nic_poll_controller, 588 #endif 589 }; 590 591 static inline bool netxen_function_zero(struct pci_dev *pdev) 592 { 593 return (PCI_FUNC(pdev->devfn) == 0) ? true : false; 594 } 595 596 static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, 597 u32 mode) 598 { 599 NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); 600 } 601 602 static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) 603 { 604 return NXRD32(adapter, NETXEN_INTR_MODE_REG); 605 } 606 607 static void 608 netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) 609 { 610 struct netxen_legacy_intr_set *legacy_intrp; 611 u32 tgt_status_reg, int_state_reg; 612 613 if (adapter->ahw.revision_id >= NX_P3_B0) 614 legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; 615 else 616 legacy_intrp = &legacy_intr[0]; 617 618 tgt_status_reg = legacy_intrp->tgt_status_reg; 619 int_state_reg = ISR_INT_STATE_REG; 620 621 adapter->int_vec_bit = legacy_intrp->int_vec_bit; 622 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); 623 adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, 624 legacy_intrp->tgt_mask_reg); 625 adapter->pci_int_reg = netxen_get_ioaddr(adapter, 626 legacy_intrp->pci_int_reg); 627 adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); 628 629 if (adapter->ahw.revision_id >= NX_P3_B1) 630 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 631 int_state_reg); 632 else 633 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 634 CRB_INT_VECTOR); 635 } 636 637 static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, 638 int num_msix) 639 { 640 struct pci_dev *pdev = adapter->pdev; 641 u32 value; 642 int err; 643 644 if (adapter->msix_supported) { 645 netxen_init_msix_entries(adapter, num_msix); 646 err = pci_enable_msix_range(pdev, adapter->msix_entries, 647 num_msix, num_msix); 648 if (err > 0) { 649 adapter->flags |= NETXEN_NIC_MSIX_ENABLED; 650 netxen_set_msix_bit(pdev, 1); 651 652 if (adapter->rss_supported) 653 adapter->max_sds_rings = num_msix; 654 655 dev_info(&pdev->dev, "using msi-x interrupts\n"); 656 return 0; 657 } 658 /* fall through for msi */ 659 } 660 661 if (use_msi && !pci_enable_msi(pdev)) { 662 value = msi_tgt_status[adapter->ahw.pci_func]; 663 adapter->flags |= NETXEN_NIC_MSI_ENABLED; 664 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); 665 adapter->msix_entries[0].vector = pdev->irq; 666 dev_info(&pdev->dev, "using msi interrupts\n"); 667 return 0; 668 } 669 670 dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); 671 return -EIO; 672 } 673 674 static int netxen_setup_intr(struct netxen_adapter *adapter) 675 { 676 struct pci_dev *pdev = adapter->pdev; 677 int num_msix; 678 679 if (adapter->rss_supported) 680 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? 681 MSIX_ENTRIES_PER_ADAPTER : 2; 682 else 683 num_msix = 1; 684 685 adapter->max_sds_rings = 1; 686 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); 687 688 netxen_initialize_interrupt_registers(adapter); 689 netxen_set_msix_bit(pdev, 0); 690 691 if (netxen_function_zero(pdev)) { 692 if (!netxen_setup_msi_interrupts(adapter, num_msix)) 693 netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); 694 else 695 netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); 696 } else { 697 if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && 698 netxen_setup_msi_interrupts(adapter, num_msix)) { 699 dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); 700 return -EIO; 701 } 702 } 703 704 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 705 adapter->msix_entries[0].vector = pdev->irq; 706 dev_info(&pdev->dev, "using legacy interrupts\n"); 707 } 708 return 0; 709 } 710 711 static void 712 netxen_teardown_intr(struct netxen_adapter *adapter) 713 { 714 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 715 pci_disable_msix(adapter->pdev); 716 if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 717 pci_disable_msi(adapter->pdev); 718 } 719 720 static void 721 netxen_cleanup_pci_map(struct netxen_adapter *adapter) 722 { 723 if (adapter->ahw.db_base != NULL) 724 iounmap(adapter->ahw.db_base); 725 if (adapter->ahw.pci_base0 != NULL) 726 iounmap(adapter->ahw.pci_base0); 727 if (adapter->ahw.pci_base1 != NULL) 728 iounmap(adapter->ahw.pci_base1); 729 if (adapter->ahw.pci_base2 != NULL) 730 iounmap(adapter->ahw.pci_base2); 731 } 732 733 static int 734 netxen_setup_pci_map(struct netxen_adapter *adapter) 735 { 736 void __iomem *db_ptr = NULL; 737 738 resource_size_t mem_base, db_base; 739 unsigned long mem_len, db_len = 0; 740 741 struct pci_dev *pdev = adapter->pdev; 742 int pci_func = adapter->ahw.pci_func; 743 struct netxen_hardware_context *ahw = &adapter->ahw; 744 745 int err = 0; 746 747 /* 748 * Set the CRB window to invalid. If any register in window 0 is 749 * accessed it should set the window to 0 and then reset it to 1. 750 */ 751 adapter->ahw.crb_win = -1; 752 adapter->ahw.ocm_win = -1; 753 754 /* remap phys address */ 755 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 756 mem_len = pci_resource_len(pdev, 0); 757 758 /* 128 Meg of memory */ 759 if (mem_len == NETXEN_PCI_128MB_SIZE) { 760 761 ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); 762 ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, 763 SECOND_PAGE_GROUP_SIZE); 764 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 765 THIRD_PAGE_GROUP_SIZE); 766 if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || 767 ahw->pci_base2 == NULL) { 768 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 769 err = -EIO; 770 goto err_out; 771 } 772 773 ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; 774 775 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 776 777 ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 778 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 779 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); 780 if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { 781 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 782 err = -EIO; 783 goto err_out; 784 } 785 786 } else if (mem_len == NETXEN_PCI_2MB_SIZE) { 787 788 ahw->pci_base0 = pci_ioremap_bar(pdev, 0); 789 if (ahw->pci_base0 == NULL) { 790 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 791 return -EIO; 792 } 793 ahw->pci_len0 = mem_len; 794 } else { 795 return -EIO; 796 } 797 798 netxen_setup_hwops(adapter); 799 800 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 801 802 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 803 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 804 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 805 806 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 807 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 808 NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); 809 } 810 811 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 812 goto skip_doorbell; 813 814 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 815 db_len = pci_resource_len(pdev, 4); 816 817 if (db_len == 0) { 818 printk(KERN_ERR "%s: doorbell is disabled\n", 819 netxen_nic_driver_name); 820 err = -EIO; 821 goto err_out; 822 } 823 824 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); 825 if (!db_ptr) { 826 printk(KERN_ERR "%s: Failed to allocate doorbell map.", 827 netxen_nic_driver_name); 828 err = -EIO; 829 goto err_out; 830 } 831 832 skip_doorbell: 833 adapter->ahw.db_base = db_ptr; 834 adapter->ahw.db_len = db_len; 835 return 0; 836 837 err_out: 838 netxen_cleanup_pci_map(adapter); 839 return err; 840 } 841 842 static void 843 netxen_check_options(struct netxen_adapter *adapter) 844 { 845 u32 fw_major, fw_minor, fw_build, prev_fw_version; 846 char brd_name[NETXEN_MAX_SHORT_NAME]; 847 char serial_num[32]; 848 int i, offset, val, err; 849 __le32 *ptr32; 850 struct pci_dev *pdev = adapter->pdev; 851 852 adapter->driver_mismatch = 0; 853 854 ptr32 = (__le32 *)&serial_num; 855 offset = NX_FW_SERIAL_NUM_OFFSET; 856 for (i = 0; i < 8; i++) { 857 if (netxen_rom_fast_read(adapter, offset, &val) == -1) { 858 dev_err(&pdev->dev, "error reading board info\n"); 859 adapter->driver_mismatch = 1; 860 return; 861 } 862 ptr32[i] = cpu_to_le32(val); 863 offset += sizeof(u32); 864 } 865 866 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 867 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 868 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 869 prev_fw_version = adapter->fw_version; 870 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); 871 872 /* Get FW Mini Coredump template and store it */ 873 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 874 if (adapter->mdump.md_template == NULL || 875 adapter->fw_version > prev_fw_version) { 876 kfree(adapter->mdump.md_template); 877 adapter->mdump.md_template = NULL; 878 err = netxen_setup_minidump(adapter); 879 if (err) 880 dev_err(&adapter->pdev->dev, 881 "Failed to setup minidump rcode = %d\n", err); 882 } 883 } 884 885 if (adapter->portnum == 0) { 886 if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, 887 brd_name)) 888 strcpy(serial_num, "Unknown"); 889 890 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", 891 module_name(THIS_MODULE), 892 brd_name, serial_num, adapter->ahw.revision_id); 893 } 894 895 if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { 896 adapter->driver_mismatch = 1; 897 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", 898 fw_major, fw_minor, fw_build); 899 return; 900 } 901 902 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 903 i = NXRD32(adapter, NETXEN_SRE_MISC); 904 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; 905 } 906 907 dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", 908 NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, 909 adapter->ahw.cut_through ? "cut-through" : "legacy"); 910 911 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) 912 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); 913 914 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 915 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 916 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 917 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 918 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 919 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 920 } 921 922 adapter->msix_supported = 0; 923 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 924 adapter->msix_supported = !!use_msi_x; 925 adapter->rss_supported = !!use_msi_x; 926 } else { 927 u32 flashed_ver = 0; 928 netxen_rom_fast_read(adapter, 929 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 930 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); 931 932 if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { 933 switch (adapter->ahw.board_type) { 934 case NETXEN_BRDTYPE_P2_SB31_10G: 935 case NETXEN_BRDTYPE_P2_SB31_10G_CX4: 936 adapter->msix_supported = !!use_msi_x; 937 adapter->rss_supported = !!use_msi_x; 938 break; 939 default: 940 break; 941 } 942 } 943 } 944 945 adapter->num_txd = MAX_CMD_DESCRIPTORS; 946 947 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 948 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; 949 adapter->max_rds_rings = 3; 950 } else { 951 adapter->num_lro_rxd = 0; 952 adapter->max_rds_rings = 2; 953 } 954 } 955 956 static int 957 netxen_start_firmware(struct netxen_adapter *adapter) 958 { 959 int val, err, first_boot; 960 struct pci_dev *pdev = adapter->pdev; 961 962 /* required for NX2031 dummy dma */ 963 err = nx_set_dma_mask(adapter); 964 if (err) 965 return err; 966 967 err = netxen_can_start_firmware(adapter); 968 969 if (err < 0) 970 return err; 971 972 if (!err) 973 goto wait_init; 974 975 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 976 977 err = netxen_check_hw_init(adapter, first_boot); 978 if (err) { 979 dev_err(&pdev->dev, "error in init HW init sequence\n"); 980 return err; 981 } 982 983 netxen_request_firmware(adapter); 984 985 err = netxen_need_fw_reset(adapter); 986 if (err < 0) 987 goto err_out; 988 if (err == 0) 989 goto pcie_strap_init; 990 991 if (first_boot != 0x55555555) { 992 NXWR32(adapter, CRB_CMDPEG_STATE, 0); 993 netxen_pinit_from_rom(adapter); 994 msleep(1); 995 } 996 997 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); 998 NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); 999 NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); 1000 1001 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1002 netxen_set_port_mode(adapter); 1003 1004 err = netxen_load_firmware(adapter); 1005 if (err) 1006 goto err_out; 1007 1008 netxen_release_firmware(adapter); 1009 1010 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1011 1012 /* Initialize multicast addr pool owners */ 1013 val = 0x7654; 1014 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) 1015 val |= 0x0f000000; 1016 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); 1017 1018 } 1019 1020 err = netxen_init_dummy_dma(adapter); 1021 if (err) 1022 goto err_out; 1023 1024 /* 1025 * Tell the hardware our version number. 1026 */ 1027 val = (_NETXEN_NIC_LINUX_MAJOR << 16) 1028 | ((_NETXEN_NIC_LINUX_MINOR << 8)) 1029 | (_NETXEN_NIC_LINUX_SUBVERSION); 1030 NXWR32(adapter, CRB_DRIVER_VERSION, val); 1031 1032 pcie_strap_init: 1033 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1034 netxen_pcie_strap_init(adapter); 1035 1036 wait_init: 1037 /* Handshake with the card before we register the devices. */ 1038 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 1039 if (err) { 1040 netxen_free_dummy_dma(adapter); 1041 goto err_out; 1042 } 1043 1044 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); 1045 1046 nx_update_dma_mask(adapter); 1047 1048 netxen_check_options(adapter); 1049 1050 adapter->need_fw_reset = 0; 1051 1052 /* fall through and release firmware */ 1053 1054 err_out: 1055 netxen_release_firmware(adapter); 1056 return err; 1057 } 1058 1059 static int 1060 netxen_nic_request_irq(struct netxen_adapter *adapter) 1061 { 1062 irq_handler_t handler; 1063 struct nx_host_sds_ring *sds_ring; 1064 int err, ring; 1065 1066 unsigned long flags = 0; 1067 struct net_device *netdev = adapter->netdev; 1068 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1069 1070 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 1071 handler = netxen_msix_intr; 1072 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 1073 handler = netxen_msi_intr; 1074 else { 1075 flags |= IRQF_SHARED; 1076 handler = netxen_intr; 1077 } 1078 adapter->irq = netdev->irq; 1079 1080 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1081 sds_ring = &recv_ctx->sds_rings[ring]; 1082 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); 1083 err = request_irq(sds_ring->irq, handler, 1084 flags, sds_ring->name, sds_ring); 1085 if (err) 1086 return err; 1087 } 1088 1089 return 0; 1090 } 1091 1092 static void 1093 netxen_nic_free_irq(struct netxen_adapter *adapter) 1094 { 1095 int ring; 1096 struct nx_host_sds_ring *sds_ring; 1097 1098 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1099 1100 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1101 sds_ring = &recv_ctx->sds_rings[ring]; 1102 free_irq(sds_ring->irq, sds_ring); 1103 } 1104 } 1105 1106 static void 1107 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) 1108 { 1109 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; 1110 adapter->coal.normal.data.rx_time_us = 1111 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; 1112 adapter->coal.normal.data.rx_packets = 1113 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; 1114 adapter->coal.normal.data.tx_time_us = 1115 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; 1116 adapter->coal.normal.data.tx_packets = 1117 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; 1118 } 1119 1120 /* with rtnl_lock */ 1121 static int 1122 __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) 1123 { 1124 int err; 1125 1126 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1127 return -EIO; 1128 1129 err = adapter->init_port(adapter, adapter->physical_port); 1130 if (err) { 1131 printk(KERN_ERR "%s: Failed to initialize port %d\n", 1132 netxen_nic_driver_name, adapter->portnum); 1133 return err; 1134 } 1135 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1136 adapter->macaddr_set(adapter, adapter->mac_addr); 1137 1138 adapter->set_multi(netdev); 1139 adapter->set_mtu(adapter, netdev->mtu); 1140 1141 adapter->ahw.linkup = 0; 1142 1143 if (adapter->max_sds_rings > 1) 1144 netxen_config_rss(adapter, 1); 1145 1146 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1147 netxen_config_intr_coalesce(adapter); 1148 1149 if (netdev->features & NETIF_F_LRO) 1150 netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); 1151 1152 netxen_napi_enable(adapter); 1153 1154 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) 1155 netxen_linkevent_request(adapter, 1); 1156 else 1157 netxen_nic_set_link_parameters(adapter); 1158 1159 set_bit(__NX_DEV_UP, &adapter->state); 1160 return 0; 1161 } 1162 1163 /* Usage: During resume and firmware recovery module.*/ 1164 1165 static inline int 1166 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) 1167 { 1168 int err = 0; 1169 1170 rtnl_lock(); 1171 if (netif_running(netdev)) 1172 err = __netxen_nic_up(adapter, netdev); 1173 rtnl_unlock(); 1174 1175 return err; 1176 } 1177 1178 /* with rtnl_lock */ 1179 static void 1180 __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) 1181 { 1182 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1183 return; 1184 1185 if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) 1186 return; 1187 1188 smp_mb(); 1189 netif_carrier_off(netdev); 1190 netif_tx_disable(netdev); 1191 1192 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) 1193 netxen_linkevent_request(adapter, 0); 1194 1195 if (adapter->stop_port) 1196 adapter->stop_port(adapter); 1197 1198 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1199 netxen_p3_free_mac_list(adapter); 1200 1201 adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); 1202 1203 netxen_napi_disable(adapter); 1204 1205 netxen_release_tx_buffers(adapter); 1206 } 1207 1208 /* Usage: During suspend and firmware recovery module */ 1209 1210 static inline void 1211 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) 1212 { 1213 rtnl_lock(); 1214 if (netif_running(netdev)) 1215 __netxen_nic_down(adapter, netdev); 1216 rtnl_unlock(); 1217 1218 } 1219 1220 static int 1221 netxen_nic_attach(struct netxen_adapter *adapter) 1222 { 1223 struct net_device *netdev = adapter->netdev; 1224 struct pci_dev *pdev = adapter->pdev; 1225 int err, ring; 1226 struct nx_host_rds_ring *rds_ring; 1227 struct nx_host_tx_ring *tx_ring; 1228 u32 capab2; 1229 1230 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 1231 return 0; 1232 1233 err = netxen_init_firmware(adapter); 1234 if (err) 1235 return err; 1236 1237 adapter->flags &= ~NETXEN_FW_MSS_CAP; 1238 if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { 1239 capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); 1240 if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) 1241 adapter->flags |= NETXEN_FW_MSS_CAP; 1242 } 1243 1244 err = netxen_napi_add(adapter, netdev); 1245 if (err) 1246 return err; 1247 1248 err = netxen_alloc_sw_resources(adapter); 1249 if (err) { 1250 printk(KERN_ERR "%s: Error in setting sw resources\n", 1251 netdev->name); 1252 return err; 1253 } 1254 1255 err = netxen_alloc_hw_resources(adapter); 1256 if (err) { 1257 printk(KERN_ERR "%s: Error in setting hw resources\n", 1258 netdev->name); 1259 goto err_out_free_sw; 1260 } 1261 1262 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1263 tx_ring = adapter->tx_ring; 1264 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, 1265 crb_cmd_producer[adapter->portnum]); 1266 tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, 1267 crb_cmd_consumer[adapter->portnum]); 1268 1269 tx_ring->producer = 0; 1270 tx_ring->sw_consumer = 0; 1271 1272 netxen_nic_update_cmd_producer(adapter, tx_ring); 1273 netxen_nic_update_cmd_consumer(adapter, tx_ring); 1274 } 1275 1276 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1277 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1278 netxen_post_rx_buffers(adapter, ring, rds_ring); 1279 } 1280 1281 err = netxen_nic_request_irq(adapter); 1282 if (err) { 1283 dev_err(&pdev->dev, "%s: failed to setup interrupt\n", 1284 netdev->name); 1285 goto err_out_free_rxbuf; 1286 } 1287 1288 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1289 netxen_nic_init_coalesce_defaults(adapter); 1290 1291 netxen_create_sysfs_entries(adapter); 1292 1293 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; 1294 return 0; 1295 1296 err_out_free_rxbuf: 1297 netxen_release_rx_buffers(adapter); 1298 netxen_free_hw_resources(adapter); 1299 err_out_free_sw: 1300 netxen_free_sw_resources(adapter); 1301 return err; 1302 } 1303 1304 static void 1305 netxen_nic_detach(struct netxen_adapter *adapter) 1306 { 1307 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1308 return; 1309 1310 netxen_remove_sysfs_entries(adapter); 1311 1312 netxen_free_hw_resources(adapter); 1313 netxen_release_rx_buffers(adapter); 1314 netxen_nic_free_irq(adapter); 1315 netxen_napi_del(adapter); 1316 netxen_free_sw_resources(adapter); 1317 1318 adapter->is_up = 0; 1319 } 1320 1321 int 1322 netxen_nic_reset_context(struct netxen_adapter *adapter) 1323 { 1324 int err = 0; 1325 struct net_device *netdev = adapter->netdev; 1326 1327 if (test_and_set_bit(__NX_RESETTING, &adapter->state)) 1328 return -EBUSY; 1329 1330 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 1331 1332 netif_device_detach(netdev); 1333 1334 if (netif_running(netdev)) 1335 __netxen_nic_down(adapter, netdev); 1336 1337 netxen_nic_detach(adapter); 1338 1339 if (netif_running(netdev)) { 1340 err = netxen_nic_attach(adapter); 1341 if (!err) 1342 err = __netxen_nic_up(adapter, netdev); 1343 1344 if (err) 1345 goto done; 1346 } 1347 1348 netif_device_attach(netdev); 1349 } 1350 1351 done: 1352 clear_bit(__NX_RESETTING, &adapter->state); 1353 return err; 1354 } 1355 1356 static int 1357 netxen_setup_netdev(struct netxen_adapter *adapter, 1358 struct net_device *netdev) 1359 { 1360 int err = 0; 1361 struct pci_dev *pdev = adapter->pdev; 1362 1363 adapter->mc_enabled = 0; 1364 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1365 adapter->max_mc_count = 38; 1366 else 1367 adapter->max_mc_count = 16; 1368 1369 netdev->netdev_ops = &netxen_netdev_ops; 1370 netdev->watchdog_timeo = 5*HZ; 1371 1372 netxen_nic_change_mtu(netdev, netdev->mtu); 1373 1374 netdev->ethtool_ops = &netxen_nic_ethtool_ops; 1375 1376 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1377 NETIF_F_RXCSUM; 1378 1379 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1380 netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 1381 1382 netdev->vlan_features |= netdev->hw_features; 1383 1384 if (adapter->pci_using_dac) { 1385 netdev->features |= NETIF_F_HIGHDMA; 1386 netdev->vlan_features |= NETIF_F_HIGHDMA; 1387 } 1388 1389 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) 1390 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1391 1392 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 1393 netdev->hw_features |= NETIF_F_LRO; 1394 1395 netdev->features |= netdev->hw_features; 1396 1397 netdev->irq = adapter->msix_entries[0].vector; 1398 1399 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); 1400 1401 if (netxen_read_mac_addr(adapter)) 1402 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1403 1404 netif_carrier_off(netdev); 1405 1406 err = register_netdev(netdev); 1407 if (err) { 1408 dev_err(&pdev->dev, "failed to register net device\n"); 1409 return err; 1410 } 1411 1412 return 0; 1413 } 1414 1415 #define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) 1416 #define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) 1417 1418 static void netxen_read_ula_info(struct netxen_adapter *adapter) 1419 { 1420 u32 temp; 1421 1422 /* Print ULA info only once for an adapter */ 1423 if (adapter->portnum != 0) 1424 return; 1425 1426 temp = NXRD32(adapter, NETXEN_ULA_KEY); 1427 switch (temp) { 1428 case NETXEN_ULA_ADAPTER_KEY: 1429 dev_info(&adapter->pdev->dev, "ULA adapter"); 1430 break; 1431 case NETXEN_NON_ULA_ADAPTER_KEY: 1432 dev_info(&adapter->pdev->dev, "non ULA adapter"); 1433 break; 1434 default: 1435 break; 1436 } 1437 1438 return; 1439 } 1440 1441 #ifdef CONFIG_PCIEAER 1442 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) 1443 { 1444 struct pci_dev *pdev = adapter->pdev; 1445 struct pci_dev *root = pdev->bus->self; 1446 u32 aer_pos; 1447 1448 /* root bus? */ 1449 if (!root) 1450 return; 1451 1452 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && 1453 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) 1454 return; 1455 1456 if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) 1457 return; 1458 1459 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); 1460 if (!aer_pos) 1461 return; 1462 1463 pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); 1464 } 1465 #endif 1466 1467 static int 1468 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1469 { 1470 struct net_device *netdev = NULL; 1471 struct netxen_adapter *adapter = NULL; 1472 int i = 0, err; 1473 int pci_func_id = PCI_FUNC(pdev->devfn); 1474 uint8_t revision_id; 1475 u32 val; 1476 1477 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1478 pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", 1479 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); 1480 return -ENODEV; 1481 } 1482 1483 if ((err = pci_enable_device(pdev))) 1484 return err; 1485 1486 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1487 err = -ENODEV; 1488 goto err_out_disable_pdev; 1489 } 1490 1491 if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) 1492 goto err_out_disable_pdev; 1493 1494 if (NX_IS_REVISION_P3(pdev->revision)) 1495 pci_enable_pcie_error_reporting(pdev); 1496 1497 pci_set_master(pdev); 1498 1499 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1500 if(!netdev) { 1501 err = -ENOMEM; 1502 goto err_out_free_res; 1503 } 1504 1505 SET_NETDEV_DEV(netdev, &pdev->dev); 1506 1507 adapter = netdev_priv(netdev); 1508 adapter->netdev = netdev; 1509 adapter->pdev = pdev; 1510 adapter->ahw.pci_func = pci_func_id; 1511 1512 revision_id = pdev->revision; 1513 adapter->ahw.revision_id = revision_id; 1514 1515 rwlock_init(&adapter->ahw.crb_lock); 1516 spin_lock_init(&adapter->ahw.mem_lock); 1517 1518 spin_lock_init(&adapter->tx_clean_lock); 1519 INIT_LIST_HEAD(&adapter->mac_list); 1520 INIT_LIST_HEAD(&adapter->ip_list); 1521 1522 err = netxen_setup_pci_map(adapter); 1523 if (err) 1524 goto err_out_free_netdev; 1525 1526 /* This will be reset for mezz cards */ 1527 adapter->portnum = pci_func_id; 1528 1529 err = netxen_nic_get_board_info(adapter); 1530 if (err) { 1531 dev_err(&pdev->dev, "Error getting board config info.\n"); 1532 goto err_out_iounmap; 1533 } 1534 1535 #ifdef CONFIG_PCIEAER 1536 netxen_mask_aer_correctable(adapter); 1537 #endif 1538 1539 /* Mezz cards have PCI function 0,2,3 enabled */ 1540 switch (adapter->ahw.board_type) { 1541 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: 1542 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: 1543 if (pci_func_id >= 2) 1544 adapter->portnum = pci_func_id - 2; 1545 break; 1546 default: 1547 break; 1548 } 1549 1550 err = netxen_check_flash_fw_compatibility(adapter); 1551 if (err) 1552 goto err_out_iounmap; 1553 1554 if (adapter->portnum == 0) { 1555 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 1556 if (val != 0xffffffff && val != 0) { 1557 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1558 adapter->need_fw_reset = 1; 1559 } 1560 } 1561 1562 err = netxen_start_firmware(adapter); 1563 if (err) 1564 goto err_out_decr_ref; 1565 1566 /* 1567 * See if the firmware gave us a virtual-physical port mapping. 1568 */ 1569 adapter->physical_port = adapter->portnum; 1570 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1571 i = NXRD32(adapter, CRB_V2P(adapter->portnum)); 1572 if (i != 0x55555555) 1573 adapter->physical_port = i; 1574 } 1575 1576 netxen_nic_clear_stats(adapter); 1577 1578 err = netxen_setup_intr(adapter); 1579 1580 if (err) { 1581 dev_err(&adapter->pdev->dev, 1582 "Failed to setup interrupts, error = %d\n", err); 1583 goto err_out_disable_msi; 1584 } 1585 1586 netxen_read_ula_info(adapter); 1587 1588 err = netxen_setup_netdev(adapter, netdev); 1589 if (err) 1590 goto err_out_disable_msi; 1591 1592 pci_set_drvdata(pdev, adapter); 1593 1594 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1595 1596 switch (adapter->ahw.port_type) { 1597 case NETXEN_NIC_GBE: 1598 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", 1599 adapter->netdev->name); 1600 break; 1601 case NETXEN_NIC_XGBE: 1602 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 1603 adapter->netdev->name); 1604 break; 1605 } 1606 1607 netxen_create_diag_entries(adapter); 1608 1609 return 0; 1610 1611 err_out_disable_msi: 1612 netxen_teardown_intr(adapter); 1613 1614 netxen_free_dummy_dma(adapter); 1615 1616 err_out_decr_ref: 1617 nx_decr_dev_ref_cnt(adapter); 1618 1619 err_out_iounmap: 1620 netxen_cleanup_pci_map(adapter); 1621 1622 err_out_free_netdev: 1623 free_netdev(netdev); 1624 1625 err_out_free_res: 1626 pci_release_regions(pdev); 1627 1628 err_out_disable_pdev: 1629 pci_disable_device(pdev); 1630 return err; 1631 } 1632 1633 static 1634 void netxen_cleanup_minidump(struct netxen_adapter *adapter) 1635 { 1636 kfree(adapter->mdump.md_template); 1637 adapter->mdump.md_template = NULL; 1638 1639 if (adapter->mdump.md_capture_buff) { 1640 vfree(adapter->mdump.md_capture_buff); 1641 adapter->mdump.md_capture_buff = NULL; 1642 } 1643 } 1644 1645 static void netxen_nic_remove(struct pci_dev *pdev) 1646 { 1647 struct netxen_adapter *adapter; 1648 struct net_device *netdev; 1649 1650 adapter = pci_get_drvdata(pdev); 1651 if (adapter == NULL) 1652 return; 1653 1654 netdev = adapter->netdev; 1655 1656 netxen_cancel_fw_work(adapter); 1657 1658 unregister_netdev(netdev); 1659 1660 cancel_work_sync(&adapter->tx_timeout_task); 1661 1662 netxen_free_ip_list(adapter, false); 1663 netxen_nic_detach(adapter); 1664 1665 nx_decr_dev_ref_cnt(adapter); 1666 1667 if (adapter->portnum == 0) 1668 netxen_free_dummy_dma(adapter); 1669 1670 clear_bit(__NX_RESETTING, &adapter->state); 1671 1672 netxen_teardown_intr(adapter); 1673 netxen_set_interrupt_mode(adapter, 0); 1674 netxen_remove_diag_entries(adapter); 1675 1676 netxen_cleanup_pci_map(adapter); 1677 1678 netxen_release_firmware(adapter); 1679 1680 if (NX_IS_REVISION_P3(pdev->revision)) { 1681 netxen_cleanup_minidump(adapter); 1682 pci_disable_pcie_error_reporting(pdev); 1683 } 1684 1685 pci_release_regions(pdev); 1686 pci_disable_device(pdev); 1687 1688 free_netdev(netdev); 1689 } 1690 1691 static void netxen_nic_detach_func(struct netxen_adapter *adapter) 1692 { 1693 struct net_device *netdev = adapter->netdev; 1694 1695 netif_device_detach(netdev); 1696 1697 netxen_cancel_fw_work(adapter); 1698 1699 if (netif_running(netdev)) 1700 netxen_nic_down(adapter, netdev); 1701 1702 cancel_work_sync(&adapter->tx_timeout_task); 1703 1704 netxen_nic_detach(adapter); 1705 1706 if (adapter->portnum == 0) 1707 netxen_free_dummy_dma(adapter); 1708 1709 nx_decr_dev_ref_cnt(adapter); 1710 1711 clear_bit(__NX_RESETTING, &adapter->state); 1712 } 1713 1714 static int netxen_nic_attach_func(struct pci_dev *pdev) 1715 { 1716 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1717 struct net_device *netdev = adapter->netdev; 1718 int err; 1719 1720 err = pci_enable_device(pdev); 1721 if (err) 1722 return err; 1723 1724 pci_set_power_state(pdev, PCI_D0); 1725 pci_set_master(pdev); 1726 pci_restore_state(pdev); 1727 1728 adapter->ahw.crb_win = -1; 1729 adapter->ahw.ocm_win = -1; 1730 1731 err = netxen_start_firmware(adapter); 1732 if (err) { 1733 dev_err(&pdev->dev, "failed to start firmware\n"); 1734 return err; 1735 } 1736 1737 if (netif_running(netdev)) { 1738 err = netxen_nic_attach(adapter); 1739 if (err) 1740 goto err_out; 1741 1742 err = netxen_nic_up(adapter, netdev); 1743 if (err) 1744 goto err_out_detach; 1745 1746 netxen_restore_indev_addr(netdev, NETDEV_UP); 1747 } 1748 1749 netif_device_attach(netdev); 1750 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1751 return 0; 1752 1753 err_out_detach: 1754 netxen_nic_detach(adapter); 1755 err_out: 1756 nx_decr_dev_ref_cnt(adapter); 1757 return err; 1758 } 1759 1760 static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, 1761 pci_channel_state_t state) 1762 { 1763 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1764 1765 if (state == pci_channel_io_perm_failure) 1766 return PCI_ERS_RESULT_DISCONNECT; 1767 1768 if (nx_dev_request_aer(adapter)) 1769 return PCI_ERS_RESULT_RECOVERED; 1770 1771 netxen_nic_detach_func(adapter); 1772 1773 pci_disable_device(pdev); 1774 1775 return PCI_ERS_RESULT_NEED_RESET; 1776 } 1777 1778 static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) 1779 { 1780 int err = 0; 1781 1782 err = netxen_nic_attach_func(pdev); 1783 1784 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1785 } 1786 1787 static void netxen_io_resume(struct pci_dev *pdev) 1788 { 1789 pci_cleanup_aer_uncorrect_error_status(pdev); 1790 } 1791 1792 static void netxen_nic_shutdown(struct pci_dev *pdev) 1793 { 1794 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1795 1796 netxen_nic_detach_func(adapter); 1797 1798 if (pci_save_state(pdev)) 1799 return; 1800 1801 if (netxen_nic_wol_supported(adapter)) { 1802 pci_enable_wake(pdev, PCI_D3cold, 1); 1803 pci_enable_wake(pdev, PCI_D3hot, 1); 1804 } 1805 1806 pci_disable_device(pdev); 1807 } 1808 1809 #ifdef CONFIG_PM 1810 static int 1811 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) 1812 { 1813 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1814 int retval; 1815 1816 netxen_nic_detach_func(adapter); 1817 1818 retval = pci_save_state(pdev); 1819 if (retval) 1820 return retval; 1821 1822 if (netxen_nic_wol_supported(adapter)) { 1823 pci_enable_wake(pdev, PCI_D3cold, 1); 1824 pci_enable_wake(pdev, PCI_D3hot, 1); 1825 } 1826 1827 pci_disable_device(pdev); 1828 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1829 1830 return 0; 1831 } 1832 1833 static int 1834 netxen_nic_resume(struct pci_dev *pdev) 1835 { 1836 return netxen_nic_attach_func(pdev); 1837 } 1838 #endif 1839 1840 static int netxen_nic_open(struct net_device *netdev) 1841 { 1842 struct netxen_adapter *adapter = netdev_priv(netdev); 1843 int err = 0; 1844 1845 if (adapter->driver_mismatch) 1846 return -EIO; 1847 1848 err = netxen_nic_attach(adapter); 1849 if (err) 1850 return err; 1851 1852 err = __netxen_nic_up(adapter, netdev); 1853 if (err) 1854 goto err_out; 1855 1856 netif_start_queue(netdev); 1857 1858 return 0; 1859 1860 err_out: 1861 netxen_nic_detach(adapter); 1862 return err; 1863 } 1864 1865 /* 1866 * netxen_nic_close - Disables a network interface entry point 1867 */ 1868 static int netxen_nic_close(struct net_device *netdev) 1869 { 1870 struct netxen_adapter *adapter = netdev_priv(netdev); 1871 1872 __netxen_nic_down(adapter, netdev); 1873 return 0; 1874 } 1875 1876 static void 1877 netxen_tso_check(struct net_device *netdev, 1878 struct nx_host_tx_ring *tx_ring, 1879 struct cmd_desc_type0 *first_desc, 1880 struct sk_buff *skb) 1881 { 1882 u8 opcode = TX_ETHER_PKT; 1883 __be16 protocol = skb->protocol; 1884 u16 flags = 0, vid = 0; 1885 u32 producer; 1886 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1887 struct cmd_desc_type0 *hwdesc; 1888 struct vlan_ethhdr *vh; 1889 1890 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 1891 1892 vh = (struct vlan_ethhdr *)skb->data; 1893 protocol = vh->h_vlan_encapsulated_proto; 1894 flags = FLAGS_VLAN_TAGGED; 1895 1896 } else if (vlan_tx_tag_present(skb)) { 1897 flags = FLAGS_VLAN_OOB; 1898 vid = vlan_tx_tag_get(skb); 1899 netxen_set_tx_vlan_tci(first_desc, vid); 1900 vlan_oob = 1; 1901 } 1902 1903 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1904 skb_shinfo(skb)->gso_size > 0) { 1905 1906 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1907 1908 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 1909 first_desc->total_hdr_length = hdr_len; 1910 if (vlan_oob) { 1911 first_desc->total_hdr_length += VLAN_HLEN; 1912 first_desc->tcp_hdr_offset = VLAN_HLEN; 1913 first_desc->ip_hdr_offset = VLAN_HLEN; 1914 /* Only in case of TSO on vlan device */ 1915 flags |= FLAGS_VLAN_TAGGED; 1916 } 1917 1918 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? 1919 TX_TCP_LSO6 : TX_TCP_LSO; 1920 tso = 1; 1921 1922 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1923 u8 l4proto; 1924 1925 if (protocol == cpu_to_be16(ETH_P_IP)) { 1926 l4proto = ip_hdr(skb)->protocol; 1927 1928 if (l4proto == IPPROTO_TCP) 1929 opcode = TX_TCP_PKT; 1930 else if(l4proto == IPPROTO_UDP) 1931 opcode = TX_UDP_PKT; 1932 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { 1933 l4proto = ipv6_hdr(skb)->nexthdr; 1934 1935 if (l4proto == IPPROTO_TCP) 1936 opcode = TX_TCPV6_PKT; 1937 else if(l4proto == IPPROTO_UDP) 1938 opcode = TX_UDPV6_PKT; 1939 } 1940 } 1941 1942 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 1943 first_desc->ip_hdr_offset += skb_network_offset(skb); 1944 netxen_set_tx_flags_opcode(first_desc, flags, opcode); 1945 1946 if (!tso) 1947 return; 1948 1949 /* For LSO, we need to copy the MAC/IP/TCP headers into 1950 * the descriptor ring 1951 */ 1952 producer = tx_ring->producer; 1953 copied = 0; 1954 offset = 2; 1955 1956 if (vlan_oob) { 1957 /* Create a TSO vlan header template for firmware */ 1958 1959 hwdesc = &tx_ring->desc_head[producer]; 1960 tx_ring->cmd_buf_arr[producer].skb = NULL; 1961 1962 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, 1963 hdr_len + VLAN_HLEN); 1964 1965 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); 1966 skb_copy_from_linear_data(skb, vh, 12); 1967 vh->h_vlan_proto = htons(ETH_P_8021Q); 1968 vh->h_vlan_TCI = htons(vid); 1969 skb_copy_from_linear_data_offset(skb, 12, 1970 (char *)vh + 16, copy_len - 16); 1971 1972 copied = copy_len - VLAN_HLEN; 1973 offset = 0; 1974 1975 producer = get_next_index(producer, tx_ring->num_desc); 1976 } 1977 1978 while (copied < hdr_len) { 1979 1980 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, 1981 (hdr_len - copied)); 1982 1983 hwdesc = &tx_ring->desc_head[producer]; 1984 tx_ring->cmd_buf_arr[producer].skb = NULL; 1985 1986 skb_copy_from_linear_data_offset(skb, copied, 1987 (char *)hwdesc + offset, copy_len); 1988 1989 copied += copy_len; 1990 offset = 0; 1991 1992 producer = get_next_index(producer, tx_ring->num_desc); 1993 } 1994 1995 tx_ring->producer = producer; 1996 barrier(); 1997 } 1998 1999 static int 2000 netxen_map_tx_skb(struct pci_dev *pdev, 2001 struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) 2002 { 2003 struct netxen_skb_frag *nf; 2004 struct skb_frag_struct *frag; 2005 int i, nr_frags; 2006 dma_addr_t map; 2007 2008 nr_frags = skb_shinfo(skb)->nr_frags; 2009 nf = &pbuf->frag_array[0]; 2010 2011 map = pci_map_single(pdev, skb->data, 2012 skb_headlen(skb), PCI_DMA_TODEVICE); 2013 if (pci_dma_mapping_error(pdev, map)) 2014 goto out_err; 2015 2016 nf->dma = map; 2017 nf->length = skb_headlen(skb); 2018 2019 for (i = 0; i < nr_frags; i++) { 2020 frag = &skb_shinfo(skb)->frags[i]; 2021 nf = &pbuf->frag_array[i+1]; 2022 2023 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 2024 DMA_TO_DEVICE); 2025 if (dma_mapping_error(&pdev->dev, map)) 2026 goto unwind; 2027 2028 nf->dma = map; 2029 nf->length = skb_frag_size(frag); 2030 } 2031 2032 return 0; 2033 2034 unwind: 2035 while (--i >= 0) { 2036 nf = &pbuf->frag_array[i+1]; 2037 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 2038 nf->dma = 0ULL; 2039 } 2040 2041 nf = &pbuf->frag_array[0]; 2042 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 2043 nf->dma = 0ULL; 2044 2045 out_err: 2046 return -ENOMEM; 2047 } 2048 2049 static inline void 2050 netxen_clear_cmddesc(u64 *desc) 2051 { 2052 desc[0] = 0ULL; 2053 desc[2] = 0ULL; 2054 } 2055 2056 static netdev_tx_t 2057 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2058 { 2059 struct netxen_adapter *adapter = netdev_priv(netdev); 2060 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 2061 struct netxen_cmd_buffer *pbuf; 2062 struct netxen_skb_frag *buffrag; 2063 struct cmd_desc_type0 *hwdesc, *first_desc; 2064 struct pci_dev *pdev; 2065 int i, k; 2066 int delta = 0; 2067 struct skb_frag_struct *frag; 2068 2069 u32 producer; 2070 int frag_count, no_of_desc; 2071 u32 num_txd = tx_ring->num_desc; 2072 2073 frag_count = skb_shinfo(skb)->nr_frags + 1; 2074 2075 /* 14 frags supported for normal packet and 2076 * 32 frags supported for TSO packet 2077 */ 2078 if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { 2079 2080 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { 2081 frag = &skb_shinfo(skb)->frags[i]; 2082 delta += skb_frag_size(frag); 2083 } 2084 2085 if (!__pskb_pull_tail(skb, delta)) 2086 goto drop_packet; 2087 2088 frag_count = 1 + skb_shinfo(skb)->nr_frags; 2089 } 2090 /* 4 fragments per cmd des */ 2091 no_of_desc = (frag_count + 3) >> 2; 2092 2093 if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 2094 netif_stop_queue(netdev); 2095 smp_mb(); 2096 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) 2097 netif_start_queue(netdev); 2098 else 2099 return NETDEV_TX_BUSY; 2100 } 2101 2102 producer = tx_ring->producer; 2103 pbuf = &tx_ring->cmd_buf_arr[producer]; 2104 2105 pdev = adapter->pdev; 2106 2107 if (netxen_map_tx_skb(pdev, skb, pbuf)) 2108 goto drop_packet; 2109 2110 pbuf->skb = skb; 2111 pbuf->frag_count = frag_count; 2112 2113 first_desc = hwdesc = &tx_ring->desc_head[producer]; 2114 netxen_clear_cmddesc((u64 *)hwdesc); 2115 2116 netxen_set_tx_frags_len(first_desc, frag_count, skb->len); 2117 netxen_set_tx_port(first_desc, adapter->portnum); 2118 2119 for (i = 0; i < frag_count; i++) { 2120 2121 k = i % 4; 2122 2123 if ((k == 0) && (i > 0)) { 2124 /* move to next desc.*/ 2125 producer = get_next_index(producer, num_txd); 2126 hwdesc = &tx_ring->desc_head[producer]; 2127 netxen_clear_cmddesc((u64 *)hwdesc); 2128 tx_ring->cmd_buf_arr[producer].skb = NULL; 2129 } 2130 2131 buffrag = &pbuf->frag_array[i]; 2132 2133 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); 2134 switch (k) { 2135 case 0: 2136 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); 2137 break; 2138 case 1: 2139 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); 2140 break; 2141 case 2: 2142 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); 2143 break; 2144 case 3: 2145 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); 2146 break; 2147 } 2148 } 2149 2150 tx_ring->producer = get_next_index(producer, num_txd); 2151 2152 netxen_tso_check(netdev, tx_ring, first_desc, skb); 2153 2154 adapter->stats.txbytes += skb->len; 2155 adapter->stats.xmitcalled++; 2156 2157 netxen_nic_update_cmd_producer(adapter, tx_ring); 2158 2159 return NETDEV_TX_OK; 2160 2161 drop_packet: 2162 adapter->stats.txdropped++; 2163 dev_kfree_skb_any(skb); 2164 return NETDEV_TX_OK; 2165 } 2166 2167 static int netxen_nic_check_temp(struct netxen_adapter *adapter) 2168 { 2169 struct net_device *netdev = adapter->netdev; 2170 uint32_t temp, temp_state, temp_val; 2171 int rv = 0; 2172 2173 temp = NXRD32(adapter, CRB_TEMP_STATE); 2174 2175 temp_state = nx_get_temp_state(temp); 2176 temp_val = nx_get_temp_val(temp); 2177 2178 if (temp_state == NX_TEMP_PANIC) { 2179 printk(KERN_ALERT 2180 "%s: Device temperature %d degrees C exceeds" 2181 " maximum allowed. Hardware has been shut down.\n", 2182 netdev->name, temp_val); 2183 rv = 1; 2184 } else if (temp_state == NX_TEMP_WARN) { 2185 if (adapter->temp == NX_TEMP_NORMAL) { 2186 printk(KERN_ALERT 2187 "%s: Device temperature %d degrees C " 2188 "exceeds operating range." 2189 " Immediate action needed.\n", 2190 netdev->name, temp_val); 2191 } 2192 } else { 2193 if (adapter->temp == NX_TEMP_WARN) { 2194 printk(KERN_INFO 2195 "%s: Device temperature is now %d degrees C" 2196 " in normal range.\n", netdev->name, 2197 temp_val); 2198 } 2199 } 2200 adapter->temp = temp_state; 2201 return rv; 2202 } 2203 2204 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) 2205 { 2206 struct net_device *netdev = adapter->netdev; 2207 2208 if (adapter->ahw.linkup && !linkup) { 2209 printk(KERN_INFO "%s: %s NIC Link is down\n", 2210 netxen_nic_driver_name, netdev->name); 2211 adapter->ahw.linkup = 0; 2212 if (netif_running(netdev)) { 2213 netif_carrier_off(netdev); 2214 netif_stop_queue(netdev); 2215 } 2216 adapter->link_changed = !adapter->has_link_events; 2217 } else if (!adapter->ahw.linkup && linkup) { 2218 printk(KERN_INFO "%s: %s NIC Link is up\n", 2219 netxen_nic_driver_name, netdev->name); 2220 adapter->ahw.linkup = 1; 2221 if (netif_running(netdev)) { 2222 netif_carrier_on(netdev); 2223 netif_wake_queue(netdev); 2224 } 2225 adapter->link_changed = !adapter->has_link_events; 2226 } 2227 } 2228 2229 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) 2230 { 2231 u32 val, port, linkup; 2232 2233 port = adapter->physical_port; 2234 2235 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2236 val = NXRD32(adapter, CRB_XG_STATE_P3); 2237 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 2238 linkup = (val == XG_LINK_UP_P3); 2239 } else { 2240 val = NXRD32(adapter, CRB_XG_STATE); 2241 val = (val >> port*8) & 0xff; 2242 linkup = (val == XG_LINK_UP); 2243 } 2244 2245 netxen_advert_link_change(adapter, linkup); 2246 } 2247 2248 static void netxen_tx_timeout(struct net_device *netdev) 2249 { 2250 struct netxen_adapter *adapter = netdev_priv(netdev); 2251 2252 if (test_bit(__NX_RESETTING, &adapter->state)) 2253 return; 2254 2255 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 2256 schedule_work(&adapter->tx_timeout_task); 2257 } 2258 2259 static void netxen_tx_timeout_task(struct work_struct *work) 2260 { 2261 struct netxen_adapter *adapter = 2262 container_of(work, struct netxen_adapter, tx_timeout_task); 2263 2264 if (!netif_running(adapter->netdev)) 2265 return; 2266 2267 if (test_and_set_bit(__NX_RESETTING, &adapter->state)) 2268 return; 2269 2270 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) 2271 goto request_reset; 2272 2273 rtnl_lock(); 2274 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 2275 /* try to scrub interrupt */ 2276 netxen_napi_disable(adapter); 2277 2278 netxen_napi_enable(adapter); 2279 2280 netif_wake_queue(adapter->netdev); 2281 2282 clear_bit(__NX_RESETTING, &adapter->state); 2283 } else { 2284 clear_bit(__NX_RESETTING, &adapter->state); 2285 if (netxen_nic_reset_context(adapter)) { 2286 rtnl_unlock(); 2287 goto request_reset; 2288 } 2289 } 2290 adapter->netdev->trans_start = jiffies; 2291 rtnl_unlock(); 2292 return; 2293 2294 request_reset: 2295 adapter->need_fw_reset = 1; 2296 clear_bit(__NX_RESETTING, &adapter->state); 2297 } 2298 2299 static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, 2300 struct rtnl_link_stats64 *stats) 2301 { 2302 struct netxen_adapter *adapter = netdev_priv(netdev); 2303 2304 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2305 stats->tx_packets = adapter->stats.xmitfinished; 2306 stats->rx_bytes = adapter->stats.rxbytes; 2307 stats->tx_bytes = adapter->stats.txbytes; 2308 stats->rx_dropped = adapter->stats.rxdropped; 2309 stats->tx_dropped = adapter->stats.txdropped; 2310 2311 return stats; 2312 } 2313 2314 static irqreturn_t netxen_intr(int irq, void *data) 2315 { 2316 struct nx_host_sds_ring *sds_ring = data; 2317 struct netxen_adapter *adapter = sds_ring->adapter; 2318 u32 status = 0; 2319 2320 status = readl(adapter->isr_int_vec); 2321 2322 if (!(status & adapter->int_vec_bit)) 2323 return IRQ_NONE; 2324 2325 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2326 /* check interrupt state machine, to be sure */ 2327 status = readl(adapter->crb_int_state_reg); 2328 if (!ISR_LEGACY_INT_TRIGGERED(status)) 2329 return IRQ_NONE; 2330 2331 } else { 2332 unsigned long our_int = 0; 2333 2334 our_int = readl(adapter->crb_int_state_reg); 2335 2336 /* not our interrupt */ 2337 if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) 2338 return IRQ_NONE; 2339 2340 /* claim interrupt */ 2341 writel((our_int & 0xffffffff), adapter->crb_int_state_reg); 2342 2343 /* clear interrupt */ 2344 netxen_nic_disable_int(sds_ring); 2345 } 2346 2347 writel(0xffffffff, adapter->tgt_status_reg); 2348 /* read twice to ensure write is flushed */ 2349 readl(adapter->isr_int_vec); 2350 readl(adapter->isr_int_vec); 2351 2352 napi_schedule(&sds_ring->napi); 2353 2354 return IRQ_HANDLED; 2355 } 2356 2357 static irqreturn_t netxen_msi_intr(int irq, void *data) 2358 { 2359 struct nx_host_sds_ring *sds_ring = data; 2360 struct netxen_adapter *adapter = sds_ring->adapter; 2361 2362 /* clear interrupt */ 2363 writel(0xffffffff, adapter->tgt_status_reg); 2364 2365 napi_schedule(&sds_ring->napi); 2366 return IRQ_HANDLED; 2367 } 2368 2369 static irqreturn_t netxen_msix_intr(int irq, void *data) 2370 { 2371 struct nx_host_sds_ring *sds_ring = data; 2372 2373 napi_schedule(&sds_ring->napi); 2374 return IRQ_HANDLED; 2375 } 2376 2377 static int netxen_nic_poll(struct napi_struct *napi, int budget) 2378 { 2379 struct nx_host_sds_ring *sds_ring = 2380 container_of(napi, struct nx_host_sds_ring, napi); 2381 2382 struct netxen_adapter *adapter = sds_ring->adapter; 2383 2384 int tx_complete; 2385 int work_done; 2386 2387 tx_complete = netxen_process_cmd_ring(adapter); 2388 2389 work_done = netxen_process_rcv_ring(sds_ring, budget); 2390 2391 if (!tx_complete) 2392 work_done = budget; 2393 2394 if (work_done < budget) { 2395 napi_complete(&sds_ring->napi); 2396 if (test_bit(__NX_DEV_UP, &adapter->state)) 2397 netxen_nic_enable_int(sds_ring); 2398 } 2399 2400 return work_done; 2401 } 2402 2403 #ifdef CONFIG_NET_POLL_CONTROLLER 2404 static void netxen_nic_poll_controller(struct net_device *netdev) 2405 { 2406 int ring; 2407 struct nx_host_sds_ring *sds_ring; 2408 struct netxen_adapter *adapter = netdev_priv(netdev); 2409 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 2410 2411 disable_irq(adapter->irq); 2412 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2413 sds_ring = &recv_ctx->sds_rings[ring]; 2414 netxen_intr(adapter->irq, sds_ring); 2415 } 2416 enable_irq(adapter->irq); 2417 } 2418 #endif 2419 2420 static int 2421 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) 2422 { 2423 int count; 2424 if (netxen_api_lock(adapter)) 2425 return -EIO; 2426 2427 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2428 2429 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); 2430 2431 netxen_api_unlock(adapter); 2432 return count; 2433 } 2434 2435 static int 2436 nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) 2437 { 2438 int count, state; 2439 if (netxen_api_lock(adapter)) 2440 return -EIO; 2441 2442 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2443 WARN_ON(count == 0); 2444 2445 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); 2446 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2447 2448 if (count == 0 && state != NX_DEV_FAILED) 2449 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); 2450 2451 netxen_api_unlock(adapter); 2452 return count; 2453 } 2454 2455 static int 2456 nx_dev_request_aer(struct netxen_adapter *adapter) 2457 { 2458 u32 state; 2459 int ret = -EINVAL; 2460 2461 if (netxen_api_lock(adapter)) 2462 return ret; 2463 2464 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2465 2466 if (state == NX_DEV_NEED_AER) 2467 ret = 0; 2468 else if (state == NX_DEV_READY) { 2469 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); 2470 ret = 0; 2471 } 2472 2473 netxen_api_unlock(adapter); 2474 return ret; 2475 } 2476 2477 int 2478 nx_dev_request_reset(struct netxen_adapter *adapter) 2479 { 2480 u32 state; 2481 int ret = -EINVAL; 2482 2483 if (netxen_api_lock(adapter)) 2484 return ret; 2485 2486 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2487 2488 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) 2489 ret = 0; 2490 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { 2491 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2492 adapter->flags |= NETXEN_FW_RESET_OWNER; 2493 ret = 0; 2494 } 2495 2496 netxen_api_unlock(adapter); 2497 2498 return ret; 2499 } 2500 2501 static int 2502 netxen_can_start_firmware(struct netxen_adapter *adapter) 2503 { 2504 int count; 2505 int can_start = 0; 2506 2507 if (netxen_api_lock(adapter)) { 2508 nx_incr_dev_ref_cnt(adapter); 2509 return -1; 2510 } 2511 2512 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2513 2514 if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) 2515 count = 0; 2516 2517 if (count == 0) { 2518 can_start = 1; 2519 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); 2520 } 2521 2522 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); 2523 2524 netxen_api_unlock(adapter); 2525 2526 return can_start; 2527 } 2528 2529 static void 2530 netxen_schedule_work(struct netxen_adapter *adapter, 2531 work_func_t func, int delay) 2532 { 2533 INIT_DELAYED_WORK(&adapter->fw_work, func); 2534 schedule_delayed_work(&adapter->fw_work, delay); 2535 } 2536 2537 static void 2538 netxen_cancel_fw_work(struct netxen_adapter *adapter) 2539 { 2540 while (test_and_set_bit(__NX_RESETTING, &adapter->state)) 2541 msleep(10); 2542 2543 cancel_delayed_work_sync(&adapter->fw_work); 2544 } 2545 2546 static void 2547 netxen_attach_work(struct work_struct *work) 2548 { 2549 struct netxen_adapter *adapter = container_of(work, 2550 struct netxen_adapter, fw_work.work); 2551 struct net_device *netdev = adapter->netdev; 2552 int err = 0; 2553 2554 if (netif_running(netdev)) { 2555 err = netxen_nic_attach(adapter); 2556 if (err) 2557 goto done; 2558 2559 err = netxen_nic_up(adapter, netdev); 2560 if (err) { 2561 netxen_nic_detach(adapter); 2562 goto done; 2563 } 2564 2565 netxen_restore_indev_addr(netdev, NETDEV_UP); 2566 } 2567 2568 netif_device_attach(netdev); 2569 2570 done: 2571 adapter->fw_fail_cnt = 0; 2572 clear_bit(__NX_RESETTING, &adapter->state); 2573 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 2574 } 2575 2576 static void 2577 netxen_fwinit_work(struct work_struct *work) 2578 { 2579 struct netxen_adapter *adapter = container_of(work, 2580 struct netxen_adapter, fw_work.work); 2581 int dev_state; 2582 int count; 2583 dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); 2584 if (adapter->flags & NETXEN_FW_RESET_OWNER) { 2585 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2586 WARN_ON(count == 0); 2587 if (count == 1) { 2588 if (adapter->mdump.md_enabled) { 2589 rtnl_lock(); 2590 netxen_dump_fw(adapter); 2591 rtnl_unlock(); 2592 } 2593 adapter->flags &= ~NETXEN_FW_RESET_OWNER; 2594 if (netxen_api_lock(adapter)) { 2595 clear_bit(__NX_RESETTING, &adapter->state); 2596 NXWR32(adapter, NX_CRB_DEV_STATE, 2597 NX_DEV_FAILED); 2598 return; 2599 } 2600 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2601 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); 2602 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); 2603 dev_state = NX_DEV_COLD; 2604 netxen_api_unlock(adapter); 2605 } 2606 } 2607 2608 switch (dev_state) { 2609 case NX_DEV_COLD: 2610 case NX_DEV_READY: 2611 if (!netxen_start_firmware(adapter)) { 2612 netxen_schedule_work(adapter, netxen_attach_work, 0); 2613 return; 2614 } 2615 break; 2616 2617 case NX_DEV_NEED_RESET: 2618 case NX_DEV_INITALIZING: 2619 netxen_schedule_work(adapter, 2620 netxen_fwinit_work, 2 * FW_POLL_DELAY); 2621 return; 2622 2623 case NX_DEV_FAILED: 2624 default: 2625 nx_incr_dev_ref_cnt(adapter); 2626 break; 2627 } 2628 2629 if (netxen_api_lock(adapter)) { 2630 clear_bit(__NX_RESETTING, &adapter->state); 2631 return; 2632 } 2633 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); 2634 netxen_api_unlock(adapter); 2635 dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n", 2636 adapter->netdev->name); 2637 2638 clear_bit(__NX_RESETTING, &adapter->state); 2639 } 2640 2641 static void 2642 netxen_detach_work(struct work_struct *work) 2643 { 2644 struct netxen_adapter *adapter = container_of(work, 2645 struct netxen_adapter, fw_work.work); 2646 struct net_device *netdev = adapter->netdev; 2647 int ref_cnt = 0, delay; 2648 u32 status; 2649 2650 netif_device_detach(netdev); 2651 2652 netxen_nic_down(adapter, netdev); 2653 2654 rtnl_lock(); 2655 netxen_nic_detach(adapter); 2656 rtnl_unlock(); 2657 2658 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2659 2660 if (status & NX_RCODE_FATAL_ERROR) 2661 goto err_ret; 2662 2663 if (adapter->temp == NX_TEMP_PANIC) 2664 goto err_ret; 2665 2666 if (!(adapter->flags & NETXEN_FW_RESET_OWNER)) 2667 ref_cnt = nx_decr_dev_ref_cnt(adapter); 2668 2669 if (ref_cnt == -EIO) 2670 goto err_ret; 2671 2672 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); 2673 2674 adapter->fw_wait_cnt = 0; 2675 netxen_schedule_work(adapter, netxen_fwinit_work, delay); 2676 2677 return; 2678 2679 err_ret: 2680 clear_bit(__NX_RESETTING, &adapter->state); 2681 } 2682 2683 static int 2684 netxen_check_health(struct netxen_adapter *adapter) 2685 { 2686 u32 state, heartbit; 2687 u32 peg_status; 2688 struct net_device *netdev = adapter->netdev; 2689 2690 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2691 if (state == NX_DEV_NEED_AER) 2692 return 0; 2693 2694 if (netxen_nic_check_temp(adapter)) 2695 goto detach; 2696 2697 if (adapter->need_fw_reset) { 2698 if (nx_dev_request_reset(adapter)) 2699 return 0; 2700 goto detach; 2701 } 2702 2703 /* NX_DEV_NEED_RESET, this state can be marked in two cases 2704 * 1. Tx timeout 2. Fw hang 2705 * Send request to destroy context in case of tx timeout only 2706 * and doesn't required in case of Fw hang 2707 */ 2708 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) { 2709 adapter->need_fw_reset = 1; 2710 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2711 goto detach; 2712 } 2713 2714 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2715 return 0; 2716 2717 heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 2718 if (heartbit != adapter->heartbit) { 2719 adapter->heartbit = heartbit; 2720 adapter->fw_fail_cnt = 0; 2721 if (adapter->need_fw_reset) 2722 goto detach; 2723 return 0; 2724 } 2725 2726 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) 2727 return 0; 2728 2729 if (nx_dev_request_reset(adapter)) 2730 return 0; 2731 2732 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2733 2734 dev_err(&netdev->dev, "firmware hang detected\n"); 2735 peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2736 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" 2737 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" 2738 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" 2739 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" 2740 "PEG_NET_4_PC: 0x%x\n", 2741 peg_status, 2742 NXRD32(adapter, NETXEN_PEG_HALT_STATUS2), 2743 NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c), 2744 NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c), 2745 NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c), 2746 NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c), 2747 NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c)); 2748 if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67) 2749 dev_err(&adapter->pdev->dev, 2750 "Firmware aborted with error code 0x00006700. " 2751 "Device is being reset.\n"); 2752 detach: 2753 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2754 !test_and_set_bit(__NX_RESETTING, &adapter->state)) 2755 netxen_schedule_work(adapter, netxen_detach_work, 0); 2756 return 1; 2757 } 2758 2759 static void 2760 netxen_fw_poll_work(struct work_struct *work) 2761 { 2762 struct netxen_adapter *adapter = container_of(work, 2763 struct netxen_adapter, fw_work.work); 2764 2765 if (test_bit(__NX_RESETTING, &adapter->state)) 2766 goto reschedule; 2767 2768 if (test_bit(__NX_DEV_UP, &adapter->state) && 2769 !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { 2770 if (!adapter->has_link_events) { 2771 2772 netxen_nic_handle_phy_intr(adapter); 2773 2774 if (adapter->link_changed) 2775 netxen_nic_set_link_parameters(adapter); 2776 } 2777 } 2778 2779 if (netxen_check_health(adapter)) 2780 return; 2781 2782 reschedule: 2783 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 2784 } 2785 2786 static ssize_t 2787 netxen_store_bridged_mode(struct device *dev, 2788 struct device_attribute *attr, const char *buf, size_t len) 2789 { 2790 struct net_device *net = to_net_dev(dev); 2791 struct netxen_adapter *adapter = netdev_priv(net); 2792 unsigned long new; 2793 int ret = -EINVAL; 2794 2795 if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) 2796 goto err_out; 2797 2798 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 2799 goto err_out; 2800 2801 if (kstrtoul(buf, 2, &new)) 2802 goto err_out; 2803 2804 if (!netxen_config_bridged_mode(adapter, !!new)) 2805 ret = len; 2806 2807 err_out: 2808 return ret; 2809 } 2810 2811 static ssize_t 2812 netxen_show_bridged_mode(struct device *dev, 2813 struct device_attribute *attr, char *buf) 2814 { 2815 struct net_device *net = to_net_dev(dev); 2816 struct netxen_adapter *adapter; 2817 int bridged_mode = 0; 2818 2819 adapter = netdev_priv(net); 2820 2821 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) 2822 bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); 2823 2824 return sprintf(buf, "%d\n", bridged_mode); 2825 } 2826 2827 static struct device_attribute dev_attr_bridged_mode = { 2828 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, 2829 .show = netxen_show_bridged_mode, 2830 .store = netxen_store_bridged_mode, 2831 }; 2832 2833 static ssize_t 2834 netxen_store_diag_mode(struct device *dev, 2835 struct device_attribute *attr, const char *buf, size_t len) 2836 { 2837 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2838 unsigned long new; 2839 2840 if (kstrtoul(buf, 2, &new)) 2841 return -EINVAL; 2842 2843 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2844 adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; 2845 2846 return len; 2847 } 2848 2849 static ssize_t 2850 netxen_show_diag_mode(struct device *dev, 2851 struct device_attribute *attr, char *buf) 2852 { 2853 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2854 2855 return sprintf(buf, "%d\n", 2856 !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); 2857 } 2858 2859 static struct device_attribute dev_attr_diag_mode = { 2860 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, 2861 .show = netxen_show_diag_mode, 2862 .store = netxen_store_diag_mode, 2863 }; 2864 2865 static int 2866 netxen_sysfs_validate_crb(struct netxen_adapter *adapter, 2867 loff_t offset, size_t size) 2868 { 2869 size_t crb_size = 4; 2870 2871 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2872 return -EIO; 2873 2874 if (offset < NETXEN_PCI_CRBSPACE) { 2875 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2876 return -EINVAL; 2877 2878 if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2879 NETXEN_PCI_CAMQM_2M_END)) 2880 crb_size = 8; 2881 else 2882 return -EINVAL; 2883 } 2884 2885 if ((size != crb_size) || (offset & (crb_size-1))) 2886 return -EINVAL; 2887 2888 return 0; 2889 } 2890 2891 static ssize_t 2892 netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, 2893 struct bin_attribute *attr, 2894 char *buf, loff_t offset, size_t size) 2895 { 2896 struct device *dev = container_of(kobj, struct device, kobj); 2897 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2898 u32 data; 2899 u64 qmdata; 2900 int ret; 2901 2902 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2903 if (ret != 0) 2904 return ret; 2905 2906 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && 2907 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2908 NETXEN_PCI_CAMQM_2M_END)) { 2909 netxen_pci_camqm_read_2M(adapter, offset, &qmdata); 2910 memcpy(buf, &qmdata, size); 2911 } else { 2912 data = NXRD32(adapter, offset); 2913 memcpy(buf, &data, size); 2914 } 2915 2916 return size; 2917 } 2918 2919 static ssize_t 2920 netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, 2921 struct bin_attribute *attr, 2922 char *buf, loff_t offset, size_t size) 2923 { 2924 struct device *dev = container_of(kobj, struct device, kobj); 2925 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2926 u32 data; 2927 u64 qmdata; 2928 int ret; 2929 2930 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2931 if (ret != 0) 2932 return ret; 2933 2934 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && 2935 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2936 NETXEN_PCI_CAMQM_2M_END)) { 2937 memcpy(&qmdata, buf, size); 2938 netxen_pci_camqm_write_2M(adapter, offset, qmdata); 2939 } else { 2940 memcpy(&data, buf, size); 2941 NXWR32(adapter, offset, data); 2942 } 2943 2944 return size; 2945 } 2946 2947 static int 2948 netxen_sysfs_validate_mem(struct netxen_adapter *adapter, 2949 loff_t offset, size_t size) 2950 { 2951 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2952 return -EIO; 2953 2954 if ((size != 8) || (offset & 0x7)) 2955 return -EIO; 2956 2957 return 0; 2958 } 2959 2960 static ssize_t 2961 netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, 2962 struct bin_attribute *attr, 2963 char *buf, loff_t offset, size_t size) 2964 { 2965 struct device *dev = container_of(kobj, struct device, kobj); 2966 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2967 u64 data; 2968 int ret; 2969 2970 ret = netxen_sysfs_validate_mem(adapter, offset, size); 2971 if (ret != 0) 2972 return ret; 2973 2974 if (adapter->pci_mem_read(adapter, offset, &data)) 2975 return -EIO; 2976 2977 memcpy(buf, &data, size); 2978 2979 return size; 2980 } 2981 2982 static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, 2983 struct bin_attribute *attr, char *buf, 2984 loff_t offset, size_t size) 2985 { 2986 struct device *dev = container_of(kobj, struct device, kobj); 2987 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2988 u64 data; 2989 int ret; 2990 2991 ret = netxen_sysfs_validate_mem(adapter, offset, size); 2992 if (ret != 0) 2993 return ret; 2994 2995 memcpy(&data, buf, size); 2996 2997 if (adapter->pci_mem_write(adapter, offset, data)) 2998 return -EIO; 2999 3000 return size; 3001 } 3002 3003 3004 static struct bin_attribute bin_attr_crb = { 3005 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, 3006 .size = 0, 3007 .read = netxen_sysfs_read_crb, 3008 .write = netxen_sysfs_write_crb, 3009 }; 3010 3011 static struct bin_attribute bin_attr_mem = { 3012 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, 3013 .size = 0, 3014 .read = netxen_sysfs_read_mem, 3015 .write = netxen_sysfs_write_mem, 3016 }; 3017 3018 static ssize_t 3019 netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, 3020 struct bin_attribute *attr, 3021 char *buf, loff_t offset, size_t size) 3022 { 3023 struct device *dev = container_of(kobj, struct device, kobj); 3024 struct netxen_adapter *adapter = dev_get_drvdata(dev); 3025 struct net_device *netdev = adapter->netdev; 3026 struct netxen_dimm_cfg dimm; 3027 u8 dw, rows, cols, banks, ranks; 3028 u32 val; 3029 3030 if (size != sizeof(struct netxen_dimm_cfg)) { 3031 netdev_err(netdev, "Invalid size\n"); 3032 return -1; 3033 } 3034 3035 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); 3036 val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); 3037 3038 /* Checks if DIMM info is valid. */ 3039 if (val & NETXEN_DIMM_VALID_FLAG) { 3040 netdev_err(netdev, "Invalid DIMM flag\n"); 3041 dimm.presence = 0xff; 3042 goto out; 3043 } 3044 3045 rows = NETXEN_DIMM_NUMROWS(val); 3046 cols = NETXEN_DIMM_NUMCOLS(val); 3047 ranks = NETXEN_DIMM_NUMRANKS(val); 3048 banks = NETXEN_DIMM_NUMBANKS(val); 3049 dw = NETXEN_DIMM_DATAWIDTH(val); 3050 3051 dimm.presence = (val & NETXEN_DIMM_PRESENT); 3052 3053 /* Checks if DIMM info is present. */ 3054 if (!dimm.presence) { 3055 netdev_err(netdev, "DIMM not present\n"); 3056 goto out; 3057 } 3058 3059 dimm.dimm_type = NETXEN_DIMM_TYPE(val); 3060 3061 switch (dimm.dimm_type) { 3062 case NETXEN_DIMM_TYPE_RDIMM: 3063 case NETXEN_DIMM_TYPE_UDIMM: 3064 case NETXEN_DIMM_TYPE_SO_DIMM: 3065 case NETXEN_DIMM_TYPE_Micro_DIMM: 3066 case NETXEN_DIMM_TYPE_Mini_RDIMM: 3067 case NETXEN_DIMM_TYPE_Mini_UDIMM: 3068 break; 3069 default: 3070 netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); 3071 goto out; 3072 } 3073 3074 if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) 3075 dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; 3076 else 3077 dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); 3078 3079 if (val & NETXEN_DIMM_SIZE) { 3080 dimm.size = NETXEN_DIMM_STD_MEM_SIZE; 3081 goto out; 3082 } 3083 3084 if (!rows) { 3085 netdev_err(netdev, "Invalid no of rows %x\n", rows); 3086 goto out; 3087 } 3088 3089 if (!cols) { 3090 netdev_err(netdev, "Invalid no of columns %x\n", cols); 3091 goto out; 3092 } 3093 3094 if (!banks) { 3095 netdev_err(netdev, "Invalid no of banks %x\n", banks); 3096 goto out; 3097 } 3098 3099 ranks += 1; 3100 3101 switch (dw) { 3102 case 0x0: 3103 dw = 32; 3104 break; 3105 case 0x1: 3106 dw = 33; 3107 break; 3108 case 0x2: 3109 dw = 36; 3110 break; 3111 case 0x3: 3112 dw = 64; 3113 break; 3114 case 0x4: 3115 dw = 72; 3116 break; 3117 case 0x5: 3118 dw = 80; 3119 break; 3120 case 0x6: 3121 dw = 128; 3122 break; 3123 case 0x7: 3124 dw = 144; 3125 break; 3126 default: 3127 netdev_err(netdev, "Invalid data-width %x\n", dw); 3128 goto out; 3129 } 3130 3131 dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; 3132 /* Size returned in MB. */ 3133 dimm.size = (dimm.size) / 0x100000; 3134 out: 3135 memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); 3136 return sizeof(struct netxen_dimm_cfg); 3137 3138 } 3139 3140 static struct bin_attribute bin_attr_dimm = { 3141 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, 3142 .size = 0, 3143 .read = netxen_sysfs_read_dimm, 3144 }; 3145 3146 3147 static void 3148 netxen_create_sysfs_entries(struct netxen_adapter *adapter) 3149 { 3150 struct device *dev = &adapter->pdev->dev; 3151 3152 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { 3153 /* bridged_mode control */ 3154 if (device_create_file(dev, &dev_attr_bridged_mode)) { 3155 dev_warn(dev, 3156 "failed to create bridged_mode sysfs entry\n"); 3157 } 3158 } 3159 } 3160 3161 static void 3162 netxen_remove_sysfs_entries(struct netxen_adapter *adapter) 3163 { 3164 struct device *dev = &adapter->pdev->dev; 3165 3166 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) 3167 device_remove_file(dev, &dev_attr_bridged_mode); 3168 } 3169 3170 static void 3171 netxen_create_diag_entries(struct netxen_adapter *adapter) 3172 { 3173 struct pci_dev *pdev = adapter->pdev; 3174 struct device *dev; 3175 3176 dev = &pdev->dev; 3177 if (device_create_file(dev, &dev_attr_diag_mode)) 3178 dev_info(dev, "failed to create diag_mode sysfs entry\n"); 3179 if (device_create_bin_file(dev, &bin_attr_crb)) 3180 dev_info(dev, "failed to create crb sysfs entry\n"); 3181 if (device_create_bin_file(dev, &bin_attr_mem)) 3182 dev_info(dev, "failed to create mem sysfs entry\n"); 3183 if (device_create_bin_file(dev, &bin_attr_dimm)) 3184 dev_info(dev, "failed to create dimm sysfs entry\n"); 3185 } 3186 3187 3188 static void 3189 netxen_remove_diag_entries(struct netxen_adapter *adapter) 3190 { 3191 struct pci_dev *pdev = adapter->pdev; 3192 struct device *dev = &pdev->dev; 3193 3194 device_remove_file(dev, &dev_attr_diag_mode); 3195 device_remove_bin_file(dev, &bin_attr_crb); 3196 device_remove_bin_file(dev, &bin_attr_mem); 3197 device_remove_bin_file(dev, &bin_attr_dimm); 3198 } 3199 3200 #ifdef CONFIG_INET 3201 3202 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) 3203 3204 static int 3205 netxen_destip_supported(struct netxen_adapter *adapter) 3206 { 3207 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 3208 return 0; 3209 3210 if (adapter->ahw.cut_through) 3211 return 0; 3212 3213 return 1; 3214 } 3215 3216 static void 3217 netxen_free_ip_list(struct netxen_adapter *adapter, bool master) 3218 { 3219 struct nx_ip_list *cur, *tmp_cur; 3220 3221 list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { 3222 if (master) { 3223 if (cur->master) { 3224 netxen_config_ipaddr(adapter, cur->ip_addr, 3225 NX_IP_DOWN); 3226 list_del(&cur->list); 3227 kfree(cur); 3228 } 3229 } else { 3230 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); 3231 list_del(&cur->list); 3232 kfree(cur); 3233 } 3234 } 3235 } 3236 3237 static bool 3238 netxen_list_config_ip(struct netxen_adapter *adapter, 3239 struct in_ifaddr *ifa, unsigned long event) 3240 { 3241 struct net_device *dev; 3242 struct nx_ip_list *cur, *tmp_cur; 3243 struct list_head *head; 3244 bool ret = false; 3245 3246 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3247 3248 if (dev == NULL) 3249 goto out; 3250 3251 switch (event) { 3252 case NX_IP_UP: 3253 list_for_each(head, &adapter->ip_list) { 3254 cur = list_entry(head, struct nx_ip_list, list); 3255 3256 if (cur->ip_addr == ifa->ifa_address) 3257 goto out; 3258 } 3259 3260 cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); 3261 if (cur == NULL) 3262 goto out; 3263 if (dev->priv_flags & IFF_802_1Q_VLAN) 3264 dev = vlan_dev_real_dev(dev); 3265 cur->master = !!netif_is_bond_master(dev); 3266 cur->ip_addr = ifa->ifa_address; 3267 list_add_tail(&cur->list, &adapter->ip_list); 3268 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); 3269 ret = true; 3270 break; 3271 case NX_IP_DOWN: 3272 list_for_each_entry_safe(cur, tmp_cur, 3273 &adapter->ip_list, list) { 3274 if (cur->ip_addr == ifa->ifa_address) { 3275 list_del(&cur->list); 3276 kfree(cur); 3277 netxen_config_ipaddr(adapter, ifa->ifa_address, 3278 NX_IP_DOWN); 3279 ret = true; 3280 break; 3281 } 3282 } 3283 } 3284 out: 3285 return ret; 3286 } 3287 3288 static void 3289 netxen_config_indev_addr(struct netxen_adapter *adapter, 3290 struct net_device *dev, unsigned long event) 3291 { 3292 struct in_device *indev; 3293 3294 if (!netxen_destip_supported(adapter)) 3295 return; 3296 3297 indev = in_dev_get(dev); 3298 if (!indev) 3299 return; 3300 3301 for_ifa(indev) { 3302 switch (event) { 3303 case NETDEV_UP: 3304 netxen_list_config_ip(adapter, ifa, NX_IP_UP); 3305 break; 3306 case NETDEV_DOWN: 3307 netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); 3308 break; 3309 default: 3310 break; 3311 } 3312 } endfor_ifa(indev); 3313 3314 in_dev_put(indev); 3315 } 3316 3317 static void 3318 netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) 3319 3320 { 3321 struct netxen_adapter *adapter = netdev_priv(netdev); 3322 struct nx_ip_list *pos, *tmp_pos; 3323 unsigned long ip_event; 3324 3325 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3326 netxen_config_indev_addr(adapter, netdev, event); 3327 3328 list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { 3329 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); 3330 } 3331 } 3332 3333 static inline bool 3334 netxen_config_checkdev(struct net_device *dev) 3335 { 3336 struct netxen_adapter *adapter; 3337 3338 if (!is_netxen_netdev(dev)) 3339 return false; 3340 adapter = netdev_priv(dev); 3341 if (!adapter) 3342 return false; 3343 if (!netxen_destip_supported(adapter)) 3344 return false; 3345 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3346 return false; 3347 3348 return true; 3349 } 3350 3351 /** 3352 * netxen_config_master - configure addresses based on master 3353 * @dev: netxen device 3354 * @event: netdev event 3355 */ 3356 static void netxen_config_master(struct net_device *dev, unsigned long event) 3357 { 3358 struct net_device *master, *slave; 3359 struct netxen_adapter *adapter = netdev_priv(dev); 3360 3361 rcu_read_lock(); 3362 master = netdev_master_upper_dev_get_rcu(dev); 3363 /* 3364 * This is the case where the netxen nic is being 3365 * enslaved and is dev_open()ed in bond_enslave() 3366 * Now we should program the bond's (and its vlans') 3367 * addresses in the netxen NIC. 3368 */ 3369 if (master && netif_is_bond_master(master) && 3370 !netif_is_bond_slave(dev)) { 3371 netxen_config_indev_addr(adapter, master, event); 3372 for_each_netdev_rcu(&init_net, slave) 3373 if (slave->priv_flags & IFF_802_1Q_VLAN && 3374 vlan_dev_real_dev(slave) == master) 3375 netxen_config_indev_addr(adapter, slave, event); 3376 } 3377 rcu_read_unlock(); 3378 /* 3379 * This is the case where the netxen nic is being 3380 * released and is dev_close()ed in bond_release() 3381 * just before IFF_BONDING is stripped. 3382 */ 3383 if (!master && dev->priv_flags & IFF_BONDING) 3384 netxen_free_ip_list(adapter, true); 3385 } 3386 3387 static int netxen_netdev_event(struct notifier_block *this, 3388 unsigned long event, void *ptr) 3389 { 3390 struct netxen_adapter *adapter; 3391 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3392 struct net_device *orig_dev = dev; 3393 struct net_device *slave; 3394 3395 recheck: 3396 if (dev == NULL) 3397 goto done; 3398 3399 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3400 dev = vlan_dev_real_dev(dev); 3401 goto recheck; 3402 } 3403 if (event == NETDEV_UP || event == NETDEV_DOWN) { 3404 /* If this is a bonding device, look for netxen-based slaves*/ 3405 if (netif_is_bond_master(dev)) { 3406 rcu_read_lock(); 3407 for_each_netdev_in_bond_rcu(dev, slave) { 3408 if (!netxen_config_checkdev(slave)) 3409 continue; 3410 adapter = netdev_priv(slave); 3411 netxen_config_indev_addr(adapter, 3412 orig_dev, event); 3413 } 3414 rcu_read_unlock(); 3415 } else { 3416 if (!netxen_config_checkdev(dev)) 3417 goto done; 3418 adapter = netdev_priv(dev); 3419 /* Act only if the actual netxen is the target */ 3420 if (orig_dev == dev) 3421 netxen_config_master(dev, event); 3422 netxen_config_indev_addr(adapter, orig_dev, event); 3423 } 3424 } 3425 done: 3426 return NOTIFY_DONE; 3427 } 3428 3429 static int 3430 netxen_inetaddr_event(struct notifier_block *this, 3431 unsigned long event, void *ptr) 3432 { 3433 struct netxen_adapter *adapter; 3434 struct net_device *dev, *slave; 3435 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3436 unsigned long ip_event; 3437 3438 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3439 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3440 recheck: 3441 if (dev == NULL) 3442 goto done; 3443 3444 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3445 dev = vlan_dev_real_dev(dev); 3446 goto recheck; 3447 } 3448 if (event == NETDEV_UP || event == NETDEV_DOWN) { 3449 /* If this is a bonding device, look for netxen-based slaves*/ 3450 if (netif_is_bond_master(dev)) { 3451 rcu_read_lock(); 3452 for_each_netdev_in_bond_rcu(dev, slave) { 3453 if (!netxen_config_checkdev(slave)) 3454 continue; 3455 adapter = netdev_priv(slave); 3456 netxen_list_config_ip(adapter, ifa, ip_event); 3457 } 3458 rcu_read_unlock(); 3459 } else { 3460 if (!netxen_config_checkdev(dev)) 3461 goto done; 3462 adapter = netdev_priv(dev); 3463 netxen_list_config_ip(adapter, ifa, ip_event); 3464 } 3465 } 3466 done: 3467 return NOTIFY_DONE; 3468 } 3469 3470 static struct notifier_block netxen_netdev_cb = { 3471 .notifier_call = netxen_netdev_event, 3472 }; 3473 3474 static struct notifier_block netxen_inetaddr_cb = { 3475 .notifier_call = netxen_inetaddr_event, 3476 }; 3477 #else 3478 static void 3479 netxen_restore_indev_addr(struct net_device *dev, unsigned long event) 3480 { } 3481 static void 3482 netxen_free_ip_list(struct netxen_adapter *adapter, bool master) 3483 { } 3484 #endif 3485 3486 static const struct pci_error_handlers netxen_err_handler = { 3487 .error_detected = netxen_io_error_detected, 3488 .slot_reset = netxen_io_slot_reset, 3489 .resume = netxen_io_resume, 3490 }; 3491 3492 static struct pci_driver netxen_driver = { 3493 .name = netxen_nic_driver_name, 3494 .id_table = netxen_pci_tbl, 3495 .probe = netxen_nic_probe, 3496 .remove = netxen_nic_remove, 3497 #ifdef CONFIG_PM 3498 .suspend = netxen_nic_suspend, 3499 .resume = netxen_nic_resume, 3500 #endif 3501 .shutdown = netxen_nic_shutdown, 3502 .err_handler = &netxen_err_handler 3503 }; 3504 3505 static int __init netxen_init_module(void) 3506 { 3507 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 3508 3509 #ifdef CONFIG_INET 3510 register_netdevice_notifier(&netxen_netdev_cb); 3511 register_inetaddr_notifier(&netxen_inetaddr_cb); 3512 #endif 3513 return pci_register_driver(&netxen_driver); 3514 } 3515 3516 module_init(netxen_init_module); 3517 3518 static void __exit netxen_exit_module(void) 3519 { 3520 pci_unregister_driver(&netxen_driver); 3521 3522 #ifdef CONFIG_INET 3523 unregister_inetaddr_notifier(&netxen_inetaddr_cb); 3524 unregister_netdevice_notifier(&netxen_netdev_cb); 3525 #endif 3526 } 3527 3528 module_exit(netxen_exit_module); 3529