1 /* 2 * Copyright (C) 2003 - 2009 NetXen, Inc. 3 * Copyright (C) 2009 - QLogic Corporation. 4 * All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * 19 * The full GNU General Public License is included in this distribution 20 * in the file called "COPYING". 21 * 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <linux/interrupt.h> 27 #include "netxen_nic_hw.h" 28 29 #include "netxen_nic.h" 30 31 #include <linux/dma-mapping.h> 32 #include <linux/if_vlan.h> 33 #include <net/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/inetdevice.h> 36 #include <linux/sysfs.h> 37 #include <linux/aer.h> 38 39 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); 40 MODULE_LICENSE("GPL"); 41 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 42 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 43 44 char netxen_nic_driver_name[] = "netxen_nic"; 45 static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" 46 NETXEN_NIC_LINUX_VERSIONID; 47 48 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; 49 50 /* Default to restricted 1G auto-neg mode */ 51 static int wol_port_mode = 5; 52 53 static int use_msi = 1; 54 55 static int use_msi_x = 1; 56 57 static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 58 module_param(auto_fw_reset, int, 0644); 59 MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); 60 61 static int netxen_nic_probe(struct pci_dev *pdev, 62 const struct pci_device_id *ent); 63 static void netxen_nic_remove(struct pci_dev *pdev); 64 static int netxen_nic_open(struct net_device *netdev); 65 static int netxen_nic_close(struct net_device *netdev); 66 static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, 67 struct net_device *); 68 static void netxen_tx_timeout(struct net_device *netdev); 69 static void netxen_tx_timeout_task(struct work_struct *work); 70 static void netxen_fw_poll_work(struct work_struct *work); 71 static void netxen_schedule_work(struct netxen_adapter *adapter, 72 work_func_t func, int delay); 73 static void netxen_cancel_fw_work(struct netxen_adapter *adapter); 74 static int netxen_nic_poll(struct napi_struct *napi, int budget); 75 #ifdef CONFIG_NET_POLL_CONTROLLER 76 static void netxen_nic_poll_controller(struct net_device *netdev); 77 #endif 78 79 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 80 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 81 static void netxen_create_diag_entries(struct netxen_adapter *adapter); 82 static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 83 static int nx_dev_request_aer(struct netxen_adapter *adapter); 84 static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 85 static int netxen_can_start_firmware(struct netxen_adapter *adapter); 86 87 static irqreturn_t netxen_intr(int irq, void *data); 88 static irqreturn_t netxen_msi_intr(int irq, void *data); 89 static irqreturn_t netxen_msix_intr(int irq, void *data); 90 91 static void netxen_free_ip_list(struct netxen_adapter *, bool); 92 static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); 93 static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, 94 struct rtnl_link_stats64 *stats); 95 static int netxen_nic_set_mac(struct net_device *netdev, void *p); 96 97 /* PCI Device ID Table */ 98 #define ENTRY(device) \ 99 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 101 102 static const struct pci_device_id netxen_pci_tbl[] = { 103 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 104 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 105 ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 106 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), 107 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), 108 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), 109 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), 110 ENTRY(PCI_DEVICE_ID_NX3031), 111 {0,} 112 }; 113 114 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 115 116 static uint32_t crb_cmd_producer[4] = { 117 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, 118 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 119 }; 120 121 void 122 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 123 struct nx_host_tx_ring *tx_ring) 124 { 125 NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); 126 } 127 128 static uint32_t crb_cmd_consumer[4] = { 129 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, 130 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 131 }; 132 133 static inline void 134 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, 135 struct nx_host_tx_ring *tx_ring) 136 { 137 NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); 138 } 139 140 static uint32_t msi_tgt_status[8] = { 141 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, 142 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, 143 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, 144 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 145 }; 146 147 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 148 149 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) 150 { 151 struct netxen_adapter *adapter = sds_ring->adapter; 152 153 NXWRIO(adapter, sds_ring->crb_intr_mask, 0); 154 } 155 156 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) 157 { 158 struct netxen_adapter *adapter = sds_ring->adapter; 159 160 NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); 161 162 if (!NETXEN_IS_MSI_FAMILY(adapter)) 163 NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); 164 } 165 166 static int 167 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) 168 { 169 int size = sizeof(struct nx_host_sds_ring) * count; 170 171 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 172 173 return recv_ctx->sds_rings == NULL; 174 } 175 176 static void 177 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) 178 { 179 if (recv_ctx->sds_rings != NULL) 180 kfree(recv_ctx->sds_rings); 181 182 recv_ctx->sds_rings = NULL; 183 } 184 185 static int 186 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) 187 { 188 int ring; 189 struct nx_host_sds_ring *sds_ring; 190 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 191 192 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 193 return -ENOMEM; 194 195 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 196 sds_ring = &recv_ctx->sds_rings[ring]; 197 netif_napi_add(netdev, &sds_ring->napi, 198 netxen_nic_poll, NAPI_POLL_WEIGHT); 199 } 200 201 return 0; 202 } 203 204 static void 205 netxen_napi_del(struct netxen_adapter *adapter) 206 { 207 int ring; 208 struct nx_host_sds_ring *sds_ring; 209 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 210 211 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 212 sds_ring = &recv_ctx->sds_rings[ring]; 213 netif_napi_del(&sds_ring->napi); 214 } 215 216 netxen_free_sds_rings(&adapter->recv_ctx); 217 } 218 219 static void 220 netxen_napi_enable(struct netxen_adapter *adapter) 221 { 222 int ring; 223 struct nx_host_sds_ring *sds_ring; 224 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 225 226 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 227 sds_ring = &recv_ctx->sds_rings[ring]; 228 napi_enable(&sds_ring->napi); 229 netxen_nic_enable_int(sds_ring); 230 } 231 } 232 233 static void 234 netxen_napi_disable(struct netxen_adapter *adapter) 235 { 236 int ring; 237 struct nx_host_sds_ring *sds_ring; 238 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 239 240 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 241 sds_ring = &recv_ctx->sds_rings[ring]; 242 netxen_nic_disable_int(sds_ring); 243 napi_synchronize(&sds_ring->napi); 244 napi_disable(&sds_ring->napi); 245 } 246 } 247 248 static int nx_set_dma_mask(struct netxen_adapter *adapter) 249 { 250 struct pci_dev *pdev = adapter->pdev; 251 uint64_t mask, cmask; 252 253 adapter->pci_using_dac = 0; 254 255 mask = DMA_BIT_MASK(32); 256 cmask = DMA_BIT_MASK(32); 257 258 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 259 #ifndef CONFIG_IA64 260 mask = DMA_BIT_MASK(35); 261 #endif 262 } else { 263 mask = DMA_BIT_MASK(39); 264 cmask = mask; 265 } 266 267 if (pci_set_dma_mask(pdev, mask) == 0 && 268 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 269 adapter->pci_using_dac = 1; 270 return 0; 271 } 272 273 return -EIO; 274 } 275 276 /* Update addressable range if firmware supports it */ 277 static int 278 nx_update_dma_mask(struct netxen_adapter *adapter) 279 { 280 int change, shift, err; 281 uint64_t mask, old_mask, old_cmask; 282 struct pci_dev *pdev = adapter->pdev; 283 284 change = 0; 285 286 shift = NXRD32(adapter, CRB_DMA_SHIFT); 287 if (shift > 32) 288 return 0; 289 290 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) 291 change = 1; 292 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) 293 change = 1; 294 295 if (change) { 296 old_mask = pdev->dma_mask; 297 old_cmask = pdev->dev.coherent_dma_mask; 298 299 mask = DMA_BIT_MASK(32+shift); 300 301 err = pci_set_dma_mask(pdev, mask); 302 if (err) 303 goto err_out; 304 305 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 306 307 err = pci_set_consistent_dma_mask(pdev, mask); 308 if (err) 309 goto err_out; 310 } 311 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); 312 } 313 314 return 0; 315 316 err_out: 317 pci_set_dma_mask(pdev, old_mask); 318 pci_set_consistent_dma_mask(pdev, old_cmask); 319 return err; 320 } 321 322 static int 323 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) 324 { 325 u32 val, timeout; 326 327 if (first_boot == 0x55555555) { 328 /* This is the first boot after power up */ 329 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 330 331 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) 332 return 0; 333 334 /* PCI bus master workaround */ 335 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 336 if (!(first_boot & 0x4)) { 337 first_boot |= 0x4; 338 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); 339 NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 340 } 341 342 /* This is the first boot after power up */ 343 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); 344 if (first_boot != 0x80000f) { 345 /* clear the register for future unloads/loads */ 346 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); 347 return -EIO; 348 } 349 350 /* Start P2 boot loader */ 351 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); 352 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); 353 timeout = 0; 354 do { 355 msleep(1); 356 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 357 358 if (++timeout > 5000) 359 return -EIO; 360 361 } while (val == NETXEN_BDINFO_MAGIC); 362 } 363 return 0; 364 } 365 366 static void netxen_set_port_mode(struct netxen_adapter *adapter) 367 { 368 u32 val, data; 369 370 val = adapter->ahw.board_type; 371 if ((val == NETXEN_BRDTYPE_P3_HMEZ) || 372 (val == NETXEN_BRDTYPE_P3_XG_LOM)) { 373 if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 374 data = NETXEN_PORT_MODE_802_3_AP; 375 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 376 } else if (port_mode == NETXEN_PORT_MODE_XG) { 377 data = NETXEN_PORT_MODE_XG; 378 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 379 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { 380 data = NETXEN_PORT_MODE_AUTO_NEG_1G; 381 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 382 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { 383 data = NETXEN_PORT_MODE_AUTO_NEG_XG; 384 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 385 } else { 386 data = NETXEN_PORT_MODE_AUTO_NEG; 387 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); 388 } 389 390 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && 391 (wol_port_mode != NETXEN_PORT_MODE_XG) && 392 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && 393 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { 394 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; 395 } 396 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); 397 } 398 } 399 400 #define PCI_CAP_ID_GEN 0x10 401 402 static void netxen_pcie_strap_init(struct netxen_adapter *adapter) 403 { 404 u32 pdevfuncsave; 405 u32 c8c9value = 0; 406 u32 chicken = 0; 407 u32 control = 0; 408 int i, pos; 409 struct pci_dev *pdev; 410 411 pdev = adapter->pdev; 412 413 chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3)); 414 /* clear chicken3.25:24 */ 415 chicken &= 0xFCFFFFFF; 416 /* 417 * if gen1 and B0, set F1020 - if gen 2, do nothing 418 * if gen2 set to F1000 419 */ 420 pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); 421 if (pos == 0xC0) { 422 pci_read_config_dword(pdev, pos + 0x10, &control); 423 if ((control & 0x000F0000) != 0x00020000) { 424 /* set chicken3.24 if gen1 */ 425 chicken |= 0x01000000; 426 } 427 dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n"); 428 c8c9value = 0xF1000; 429 } else { 430 /* set chicken3.24 if gen1 */ 431 chicken |= 0x01000000; 432 dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n"); 433 if (adapter->ahw.revision_id == NX_P3_B0) 434 c8c9value = 0xF1020; 435 else 436 c8c9value = 0; 437 } 438 439 NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken); 440 441 if (!c8c9value) 442 return; 443 444 pdevfuncsave = pdev->devfn; 445 if (pdevfuncsave & 0x07) 446 return; 447 448 for (i = 0; i < 8; i++) { 449 pci_read_config_dword(pdev, pos + 8, &control); 450 pci_read_config_dword(pdev, pos + 8, &control); 451 pci_write_config_dword(pdev, pos + 8, c8c9value); 452 pdev->devfn++; 453 } 454 pdev->devfn = pdevfuncsave; 455 } 456 457 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) 458 { 459 u32 control; 460 461 if (pdev->msix_cap) { 462 pci_read_config_dword(pdev, pdev->msix_cap, &control); 463 if (enable) 464 control |= PCI_MSIX_FLAGS_ENABLE; 465 else 466 control = 0; 467 pci_write_config_dword(pdev, pdev->msix_cap, control); 468 } 469 } 470 471 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) 472 { 473 int i; 474 475 for (i = 0; i < count; i++) 476 adapter->msix_entries[i].entry = i; 477 } 478 479 static int 480 netxen_read_mac_addr(struct netxen_adapter *adapter) 481 { 482 int i; 483 unsigned char *p; 484 u64 mac_addr; 485 struct net_device *netdev = adapter->netdev; 486 struct pci_dev *pdev = adapter->pdev; 487 488 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 489 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) 490 return -EIO; 491 } else { 492 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) 493 return -EIO; 494 } 495 496 p = (unsigned char *)&mac_addr; 497 for (i = 0; i < 6; i++) 498 netdev->dev_addr[i] = *(p + 5 - i); 499 500 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 501 502 /* set station address */ 503 504 if (!is_valid_ether_addr(netdev->dev_addr)) 505 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); 506 507 return 0; 508 } 509 510 static int netxen_nic_set_mac(struct net_device *netdev, void *p) 511 { 512 struct netxen_adapter *adapter = netdev_priv(netdev); 513 struct sockaddr *addr = p; 514 515 if (!is_valid_ether_addr(addr->sa_data)) 516 return -EADDRNOTAVAIL; 517 518 if (netif_running(netdev)) { 519 netif_device_detach(netdev); 520 netxen_napi_disable(adapter); 521 } 522 523 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); 524 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 525 adapter->macaddr_set(adapter, addr->sa_data); 526 527 if (netif_running(netdev)) { 528 netif_device_attach(netdev); 529 netxen_napi_enable(adapter); 530 } 531 return 0; 532 } 533 534 static void netxen_set_multicast_list(struct net_device *dev) 535 { 536 struct netxen_adapter *adapter = netdev_priv(dev); 537 538 adapter->set_multi(dev); 539 } 540 541 static netdev_features_t netxen_fix_features(struct net_device *dev, 542 netdev_features_t features) 543 { 544 if (!(features & NETIF_F_RXCSUM)) { 545 netdev_info(dev, "disabling LRO as RXCSUM is off\n"); 546 547 features &= ~NETIF_F_LRO; 548 } 549 550 return features; 551 } 552 553 static int netxen_set_features(struct net_device *dev, 554 netdev_features_t features) 555 { 556 struct netxen_adapter *adapter = netdev_priv(dev); 557 int hw_lro; 558 559 if (!((dev->features ^ features) & NETIF_F_LRO)) 560 return 0; 561 562 hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED 563 : NETXEN_NIC_LRO_DISABLED; 564 565 if (netxen_config_hw_lro(adapter, hw_lro)) 566 return -EIO; 567 568 if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) 569 return -EIO; 570 571 return 0; 572 } 573 574 static const struct net_device_ops netxen_netdev_ops = { 575 .ndo_open = netxen_nic_open, 576 .ndo_stop = netxen_nic_close, 577 .ndo_start_xmit = netxen_nic_xmit_frame, 578 .ndo_get_stats64 = netxen_nic_get_stats, 579 .ndo_validate_addr = eth_validate_addr, 580 .ndo_set_rx_mode = netxen_set_multicast_list, 581 .ndo_set_mac_address = netxen_nic_set_mac, 582 .ndo_change_mtu = netxen_nic_change_mtu, 583 .ndo_tx_timeout = netxen_tx_timeout, 584 .ndo_fix_features = netxen_fix_features, 585 .ndo_set_features = netxen_set_features, 586 #ifdef CONFIG_NET_POLL_CONTROLLER 587 .ndo_poll_controller = netxen_nic_poll_controller, 588 #endif 589 }; 590 591 static inline bool netxen_function_zero(struct pci_dev *pdev) 592 { 593 return (PCI_FUNC(pdev->devfn) == 0) ? true : false; 594 } 595 596 static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, 597 u32 mode) 598 { 599 NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); 600 } 601 602 static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) 603 { 604 return NXRD32(adapter, NETXEN_INTR_MODE_REG); 605 } 606 607 static void 608 netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) 609 { 610 struct netxen_legacy_intr_set *legacy_intrp; 611 u32 tgt_status_reg, int_state_reg; 612 613 if (adapter->ahw.revision_id >= NX_P3_B0) 614 legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; 615 else 616 legacy_intrp = &legacy_intr[0]; 617 618 tgt_status_reg = legacy_intrp->tgt_status_reg; 619 int_state_reg = ISR_INT_STATE_REG; 620 621 adapter->int_vec_bit = legacy_intrp->int_vec_bit; 622 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); 623 adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, 624 legacy_intrp->tgt_mask_reg); 625 adapter->pci_int_reg = netxen_get_ioaddr(adapter, 626 legacy_intrp->pci_int_reg); 627 adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); 628 629 if (adapter->ahw.revision_id >= NX_P3_B1) 630 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 631 int_state_reg); 632 else 633 adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, 634 CRB_INT_VECTOR); 635 } 636 637 static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, 638 int num_msix) 639 { 640 struct pci_dev *pdev = adapter->pdev; 641 u32 value; 642 int err; 643 644 if (adapter->msix_supported) { 645 netxen_init_msix_entries(adapter, num_msix); 646 err = pci_enable_msix_range(pdev, adapter->msix_entries, 647 num_msix, num_msix); 648 if (err > 0) { 649 adapter->flags |= NETXEN_NIC_MSIX_ENABLED; 650 netxen_set_msix_bit(pdev, 1); 651 652 if (adapter->rss_supported) 653 adapter->max_sds_rings = num_msix; 654 655 dev_info(&pdev->dev, "using msi-x interrupts\n"); 656 return 0; 657 } 658 /* fall through for msi */ 659 } 660 661 if (use_msi && !pci_enable_msi(pdev)) { 662 value = msi_tgt_status[adapter->ahw.pci_func]; 663 adapter->flags |= NETXEN_NIC_MSI_ENABLED; 664 adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); 665 adapter->msix_entries[0].vector = pdev->irq; 666 dev_info(&pdev->dev, "using msi interrupts\n"); 667 return 0; 668 } 669 670 dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); 671 return -EIO; 672 } 673 674 static int netxen_setup_intr(struct netxen_adapter *adapter) 675 { 676 struct pci_dev *pdev = adapter->pdev; 677 int num_msix; 678 679 if (adapter->rss_supported) 680 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? 681 MSIX_ENTRIES_PER_ADAPTER : 2; 682 else 683 num_msix = 1; 684 685 adapter->max_sds_rings = 1; 686 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); 687 688 netxen_initialize_interrupt_registers(adapter); 689 netxen_set_msix_bit(pdev, 0); 690 691 if (netxen_function_zero(pdev)) { 692 if (!netxen_setup_msi_interrupts(adapter, num_msix)) 693 netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); 694 else 695 netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); 696 } else { 697 if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && 698 netxen_setup_msi_interrupts(adapter, num_msix)) { 699 dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); 700 return -EIO; 701 } 702 } 703 704 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 705 adapter->msix_entries[0].vector = pdev->irq; 706 dev_info(&pdev->dev, "using legacy interrupts\n"); 707 } 708 return 0; 709 } 710 711 static void 712 netxen_teardown_intr(struct netxen_adapter *adapter) 713 { 714 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 715 pci_disable_msix(adapter->pdev); 716 if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 717 pci_disable_msi(adapter->pdev); 718 } 719 720 static void 721 netxen_cleanup_pci_map(struct netxen_adapter *adapter) 722 { 723 if (adapter->ahw.db_base != NULL) 724 iounmap(adapter->ahw.db_base); 725 if (adapter->ahw.pci_base0 != NULL) 726 iounmap(adapter->ahw.pci_base0); 727 if (adapter->ahw.pci_base1 != NULL) 728 iounmap(adapter->ahw.pci_base1); 729 if (adapter->ahw.pci_base2 != NULL) 730 iounmap(adapter->ahw.pci_base2); 731 } 732 733 static int 734 netxen_setup_pci_map(struct netxen_adapter *adapter) 735 { 736 void __iomem *db_ptr = NULL; 737 738 resource_size_t mem_base, db_base; 739 unsigned long mem_len, db_len = 0; 740 741 struct pci_dev *pdev = adapter->pdev; 742 int pci_func = adapter->ahw.pci_func; 743 struct netxen_hardware_context *ahw = &adapter->ahw; 744 745 int err = 0; 746 747 /* 748 * Set the CRB window to invalid. If any register in window 0 is 749 * accessed it should set the window to 0 and then reset it to 1. 750 */ 751 adapter->ahw.crb_win = -1; 752 adapter->ahw.ocm_win = -1; 753 754 /* remap phys address */ 755 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 756 mem_len = pci_resource_len(pdev, 0); 757 758 /* 128 Meg of memory */ 759 if (mem_len == NETXEN_PCI_128MB_SIZE) { 760 761 ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); 762 ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, 763 SECOND_PAGE_GROUP_SIZE); 764 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 765 THIRD_PAGE_GROUP_SIZE); 766 if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || 767 ahw->pci_base2 == NULL) { 768 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 769 err = -EIO; 770 goto err_out; 771 } 772 773 ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; 774 775 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 776 777 ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 778 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 779 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); 780 if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { 781 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 782 err = -EIO; 783 goto err_out; 784 } 785 786 } else if (mem_len == NETXEN_PCI_2MB_SIZE) { 787 788 ahw->pci_base0 = pci_ioremap_bar(pdev, 0); 789 if (ahw->pci_base0 == NULL) { 790 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 791 return -EIO; 792 } 793 ahw->pci_len0 = mem_len; 794 } else { 795 return -EIO; 796 } 797 798 netxen_setup_hwops(adapter); 799 800 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 801 802 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 803 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 804 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 805 806 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 807 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 808 NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); 809 } 810 811 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 812 goto skip_doorbell; 813 814 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 815 db_len = pci_resource_len(pdev, 4); 816 817 if (db_len == 0) { 818 printk(KERN_ERR "%s: doorbell is disabled\n", 819 netxen_nic_driver_name); 820 err = -EIO; 821 goto err_out; 822 } 823 824 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); 825 if (!db_ptr) { 826 printk(KERN_ERR "%s: Failed to allocate doorbell map.", 827 netxen_nic_driver_name); 828 err = -EIO; 829 goto err_out; 830 } 831 832 skip_doorbell: 833 adapter->ahw.db_base = db_ptr; 834 adapter->ahw.db_len = db_len; 835 return 0; 836 837 err_out: 838 netxen_cleanup_pci_map(adapter); 839 return err; 840 } 841 842 static void 843 netxen_check_options(struct netxen_adapter *adapter) 844 { 845 u32 fw_major, fw_minor, fw_build, prev_fw_version; 846 char brd_name[NETXEN_MAX_SHORT_NAME]; 847 char serial_num[32]; 848 int i, offset, val, err; 849 __le32 *ptr32; 850 struct pci_dev *pdev = adapter->pdev; 851 852 adapter->driver_mismatch = 0; 853 854 ptr32 = (__le32 *)&serial_num; 855 offset = NX_FW_SERIAL_NUM_OFFSET; 856 for (i = 0; i < 8; i++) { 857 if (netxen_rom_fast_read(adapter, offset, &val) == -1) { 858 dev_err(&pdev->dev, "error reading board info\n"); 859 adapter->driver_mismatch = 1; 860 return; 861 } 862 ptr32[i] = cpu_to_le32(val); 863 offset += sizeof(u32); 864 } 865 866 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 867 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 868 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 869 prev_fw_version = adapter->fw_version; 870 adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); 871 872 /* Get FW Mini Coredump template and store it */ 873 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 874 if (adapter->mdump.md_template == NULL || 875 adapter->fw_version > prev_fw_version) { 876 kfree(adapter->mdump.md_template); 877 adapter->mdump.md_template = NULL; 878 err = netxen_setup_minidump(adapter); 879 if (err) 880 dev_err(&adapter->pdev->dev, 881 "Failed to setup minidump rcode = %d\n", err); 882 } 883 } 884 885 if (adapter->portnum == 0) { 886 if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, 887 brd_name)) 888 strcpy(serial_num, "Unknown"); 889 890 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", 891 module_name(THIS_MODULE), 892 brd_name, serial_num, adapter->ahw.revision_id); 893 } 894 895 if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { 896 adapter->driver_mismatch = 1; 897 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", 898 fw_major, fw_minor, fw_build); 899 return; 900 } 901 902 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 903 i = NXRD32(adapter, NETXEN_SRE_MISC); 904 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; 905 } 906 907 dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", 908 NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, 909 adapter->ahw.cut_through ? "cut-through" : "legacy"); 910 911 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) 912 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); 913 914 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 915 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 916 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 917 } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 918 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 919 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 920 } 921 922 adapter->msix_supported = 0; 923 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 924 adapter->msix_supported = !!use_msi_x; 925 adapter->rss_supported = !!use_msi_x; 926 } else { 927 u32 flashed_ver = 0; 928 netxen_rom_fast_read(adapter, 929 NX_FW_VERSION_OFFSET, (int *)&flashed_ver); 930 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); 931 932 if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { 933 switch (adapter->ahw.board_type) { 934 case NETXEN_BRDTYPE_P2_SB31_10G: 935 case NETXEN_BRDTYPE_P2_SB31_10G_CX4: 936 adapter->msix_supported = !!use_msi_x; 937 adapter->rss_supported = !!use_msi_x; 938 break; 939 default: 940 break; 941 } 942 } 943 } 944 945 adapter->num_txd = MAX_CMD_DESCRIPTORS; 946 947 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 948 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; 949 adapter->max_rds_rings = 3; 950 } else { 951 adapter->num_lro_rxd = 0; 952 adapter->max_rds_rings = 2; 953 } 954 } 955 956 static int 957 netxen_start_firmware(struct netxen_adapter *adapter) 958 { 959 int val, err, first_boot; 960 struct pci_dev *pdev = adapter->pdev; 961 962 /* required for NX2031 dummy dma */ 963 err = nx_set_dma_mask(adapter); 964 if (err) 965 return err; 966 967 err = netxen_can_start_firmware(adapter); 968 969 if (err < 0) 970 return err; 971 972 if (!err) 973 goto wait_init; 974 975 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); 976 977 err = netxen_check_hw_init(adapter, first_boot); 978 if (err) { 979 dev_err(&pdev->dev, "error in init HW init sequence\n"); 980 return err; 981 } 982 983 netxen_request_firmware(adapter); 984 985 err = netxen_need_fw_reset(adapter); 986 if (err < 0) 987 goto err_out; 988 if (err == 0) 989 goto pcie_strap_init; 990 991 if (first_boot != 0x55555555) { 992 NXWR32(adapter, CRB_CMDPEG_STATE, 0); 993 netxen_pinit_from_rom(adapter); 994 msleep(1); 995 } 996 997 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); 998 NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); 999 NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); 1000 1001 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1002 netxen_set_port_mode(adapter); 1003 1004 err = netxen_load_firmware(adapter); 1005 if (err) 1006 goto err_out; 1007 1008 netxen_release_firmware(adapter); 1009 1010 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1011 1012 /* Initialize multicast addr pool owners */ 1013 val = 0x7654; 1014 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) 1015 val |= 0x0f000000; 1016 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); 1017 1018 } 1019 1020 err = netxen_init_dummy_dma(adapter); 1021 if (err) 1022 goto err_out; 1023 1024 /* 1025 * Tell the hardware our version number. 1026 */ 1027 val = (_NETXEN_NIC_LINUX_MAJOR << 16) 1028 | ((_NETXEN_NIC_LINUX_MINOR << 8)) 1029 | (_NETXEN_NIC_LINUX_SUBVERSION); 1030 NXWR32(adapter, CRB_DRIVER_VERSION, val); 1031 1032 pcie_strap_init: 1033 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1034 netxen_pcie_strap_init(adapter); 1035 1036 wait_init: 1037 /* Handshake with the card before we register the devices. */ 1038 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 1039 if (err) { 1040 netxen_free_dummy_dma(adapter); 1041 goto err_out; 1042 } 1043 1044 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); 1045 1046 nx_update_dma_mask(adapter); 1047 1048 netxen_check_options(adapter); 1049 1050 adapter->need_fw_reset = 0; 1051 1052 /* fall through and release firmware */ 1053 1054 err_out: 1055 netxen_release_firmware(adapter); 1056 return err; 1057 } 1058 1059 static int 1060 netxen_nic_request_irq(struct netxen_adapter *adapter) 1061 { 1062 irq_handler_t handler; 1063 struct nx_host_sds_ring *sds_ring; 1064 int err, ring; 1065 1066 unsigned long flags = 0; 1067 struct net_device *netdev = adapter->netdev; 1068 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1069 1070 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 1071 handler = netxen_msix_intr; 1072 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 1073 handler = netxen_msi_intr; 1074 else { 1075 flags |= IRQF_SHARED; 1076 handler = netxen_intr; 1077 } 1078 adapter->irq = netdev->irq; 1079 1080 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1081 sds_ring = &recv_ctx->sds_rings[ring]; 1082 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); 1083 err = request_irq(sds_ring->irq, handler, 1084 flags, sds_ring->name, sds_ring); 1085 if (err) 1086 return err; 1087 } 1088 1089 return 0; 1090 } 1091 1092 static void 1093 netxen_nic_free_irq(struct netxen_adapter *adapter) 1094 { 1095 int ring; 1096 struct nx_host_sds_ring *sds_ring; 1097 1098 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 1099 1100 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1101 sds_ring = &recv_ctx->sds_rings[ring]; 1102 free_irq(sds_ring->irq, sds_ring); 1103 } 1104 } 1105 1106 static void 1107 netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) 1108 { 1109 adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; 1110 adapter->coal.normal.data.rx_time_us = 1111 NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; 1112 adapter->coal.normal.data.rx_packets = 1113 NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; 1114 adapter->coal.normal.data.tx_time_us = 1115 NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; 1116 adapter->coal.normal.data.tx_packets = 1117 NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; 1118 } 1119 1120 /* with rtnl_lock */ 1121 static int 1122 __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) 1123 { 1124 int err; 1125 1126 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1127 return -EIO; 1128 1129 err = adapter->init_port(adapter, adapter->physical_port); 1130 if (err) { 1131 printk(KERN_ERR "%s: Failed to initialize port %d\n", 1132 netxen_nic_driver_name, adapter->portnum); 1133 return err; 1134 } 1135 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 1136 adapter->macaddr_set(adapter, adapter->mac_addr); 1137 1138 adapter->set_multi(netdev); 1139 adapter->set_mtu(adapter, netdev->mtu); 1140 1141 adapter->ahw.linkup = 0; 1142 1143 if (adapter->max_sds_rings > 1) 1144 netxen_config_rss(adapter, 1); 1145 1146 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1147 netxen_config_intr_coalesce(adapter); 1148 1149 if (netdev->features & NETIF_F_LRO) 1150 netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); 1151 1152 netxen_napi_enable(adapter); 1153 1154 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) 1155 netxen_linkevent_request(adapter, 1); 1156 else 1157 netxen_nic_set_link_parameters(adapter); 1158 1159 set_bit(__NX_DEV_UP, &adapter->state); 1160 return 0; 1161 } 1162 1163 /* Usage: During resume and firmware recovery module.*/ 1164 1165 static inline int 1166 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) 1167 { 1168 int err = 0; 1169 1170 rtnl_lock(); 1171 if (netif_running(netdev)) 1172 err = __netxen_nic_up(adapter, netdev); 1173 rtnl_unlock(); 1174 1175 return err; 1176 } 1177 1178 /* with rtnl_lock */ 1179 static void 1180 __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) 1181 { 1182 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1183 return; 1184 1185 if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) 1186 return; 1187 1188 smp_mb(); 1189 netif_carrier_off(netdev); 1190 netif_tx_disable(netdev); 1191 1192 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) 1193 netxen_linkevent_request(adapter, 0); 1194 1195 if (adapter->stop_port) 1196 adapter->stop_port(adapter); 1197 1198 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1199 netxen_p3_free_mac_list(adapter); 1200 1201 adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); 1202 1203 netxen_napi_disable(adapter); 1204 1205 netxen_release_tx_buffers(adapter); 1206 } 1207 1208 /* Usage: During suspend and firmware recovery module */ 1209 1210 static inline void 1211 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) 1212 { 1213 rtnl_lock(); 1214 if (netif_running(netdev)) 1215 __netxen_nic_down(adapter, netdev); 1216 rtnl_unlock(); 1217 1218 } 1219 1220 static int 1221 netxen_nic_attach(struct netxen_adapter *adapter) 1222 { 1223 struct net_device *netdev = adapter->netdev; 1224 struct pci_dev *pdev = adapter->pdev; 1225 int err, ring; 1226 struct nx_host_rds_ring *rds_ring; 1227 struct nx_host_tx_ring *tx_ring; 1228 u32 capab2; 1229 1230 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 1231 return 0; 1232 1233 err = netxen_init_firmware(adapter); 1234 if (err) 1235 return err; 1236 1237 adapter->flags &= ~NETXEN_FW_MSS_CAP; 1238 if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { 1239 capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); 1240 if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) 1241 adapter->flags |= NETXEN_FW_MSS_CAP; 1242 } 1243 1244 err = netxen_napi_add(adapter, netdev); 1245 if (err) 1246 return err; 1247 1248 err = netxen_alloc_sw_resources(adapter); 1249 if (err) { 1250 printk(KERN_ERR "%s: Error in setting sw resources\n", 1251 netdev->name); 1252 return err; 1253 } 1254 1255 err = netxen_alloc_hw_resources(adapter); 1256 if (err) { 1257 printk(KERN_ERR "%s: Error in setting hw resources\n", 1258 netdev->name); 1259 goto err_out_free_sw; 1260 } 1261 1262 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1263 tx_ring = adapter->tx_ring; 1264 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, 1265 crb_cmd_producer[adapter->portnum]); 1266 tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, 1267 crb_cmd_consumer[adapter->portnum]); 1268 1269 tx_ring->producer = 0; 1270 tx_ring->sw_consumer = 0; 1271 1272 netxen_nic_update_cmd_producer(adapter, tx_ring); 1273 netxen_nic_update_cmd_consumer(adapter, tx_ring); 1274 } 1275 1276 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1277 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1278 netxen_post_rx_buffers(adapter, ring, rds_ring); 1279 } 1280 1281 err = netxen_nic_request_irq(adapter); 1282 if (err) { 1283 dev_err(&pdev->dev, "%s: failed to setup interrupt\n", 1284 netdev->name); 1285 goto err_out_free_rxbuf; 1286 } 1287 1288 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1289 netxen_nic_init_coalesce_defaults(adapter); 1290 1291 netxen_create_sysfs_entries(adapter); 1292 1293 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; 1294 return 0; 1295 1296 err_out_free_rxbuf: 1297 netxen_release_rx_buffers(adapter); 1298 netxen_free_hw_resources(adapter); 1299 err_out_free_sw: 1300 netxen_free_sw_resources(adapter); 1301 return err; 1302 } 1303 1304 static void 1305 netxen_nic_detach(struct netxen_adapter *adapter) 1306 { 1307 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 1308 return; 1309 1310 netxen_remove_sysfs_entries(adapter); 1311 1312 netxen_free_hw_resources(adapter); 1313 netxen_release_rx_buffers(adapter); 1314 netxen_nic_free_irq(adapter); 1315 netxen_napi_del(adapter); 1316 netxen_free_sw_resources(adapter); 1317 1318 adapter->is_up = 0; 1319 } 1320 1321 int 1322 netxen_nic_reset_context(struct netxen_adapter *adapter) 1323 { 1324 int err = 0; 1325 struct net_device *netdev = adapter->netdev; 1326 1327 if (test_and_set_bit(__NX_RESETTING, &adapter->state)) 1328 return -EBUSY; 1329 1330 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 1331 1332 netif_device_detach(netdev); 1333 1334 if (netif_running(netdev)) 1335 __netxen_nic_down(adapter, netdev); 1336 1337 netxen_nic_detach(adapter); 1338 1339 if (netif_running(netdev)) { 1340 err = netxen_nic_attach(adapter); 1341 if (!err) 1342 err = __netxen_nic_up(adapter, netdev); 1343 1344 if (err) 1345 goto done; 1346 } 1347 1348 netif_device_attach(netdev); 1349 } 1350 1351 done: 1352 clear_bit(__NX_RESETTING, &adapter->state); 1353 return err; 1354 } 1355 1356 static int 1357 netxen_setup_netdev(struct netxen_adapter *adapter, 1358 struct net_device *netdev) 1359 { 1360 int err = 0; 1361 struct pci_dev *pdev = adapter->pdev; 1362 1363 adapter->mc_enabled = 0; 1364 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1365 adapter->max_mc_count = 38; 1366 else 1367 adapter->max_mc_count = 16; 1368 1369 netdev->netdev_ops = &netxen_netdev_ops; 1370 netdev->watchdog_timeo = 5*HZ; 1371 1372 netxen_nic_change_mtu(netdev, netdev->mtu); 1373 1374 netdev->ethtool_ops = &netxen_nic_ethtool_ops; 1375 1376 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1377 NETIF_F_RXCSUM; 1378 1379 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1380 netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 1381 1382 netdev->vlan_features |= netdev->hw_features; 1383 1384 if (adapter->pci_using_dac) { 1385 netdev->features |= NETIF_F_HIGHDMA; 1386 netdev->vlan_features |= NETIF_F_HIGHDMA; 1387 } 1388 1389 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) 1390 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1391 1392 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 1393 netdev->hw_features |= NETIF_F_LRO; 1394 1395 netdev->features |= netdev->hw_features; 1396 1397 netdev->irq = adapter->msix_entries[0].vector; 1398 1399 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); 1400 1401 if (netxen_read_mac_addr(adapter)) 1402 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1403 1404 netif_carrier_off(netdev); 1405 1406 err = register_netdev(netdev); 1407 if (err) { 1408 dev_err(&pdev->dev, "failed to register net device\n"); 1409 return err; 1410 } 1411 1412 return 0; 1413 } 1414 1415 #define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) 1416 #define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) 1417 1418 static void netxen_read_ula_info(struct netxen_adapter *adapter) 1419 { 1420 u32 temp; 1421 1422 /* Print ULA info only once for an adapter */ 1423 if (adapter->portnum != 0) 1424 return; 1425 1426 temp = NXRD32(adapter, NETXEN_ULA_KEY); 1427 switch (temp) { 1428 case NETXEN_ULA_ADAPTER_KEY: 1429 dev_info(&adapter->pdev->dev, "ULA adapter"); 1430 break; 1431 case NETXEN_NON_ULA_ADAPTER_KEY: 1432 dev_info(&adapter->pdev->dev, "non ULA adapter"); 1433 break; 1434 default: 1435 break; 1436 } 1437 1438 return; 1439 } 1440 1441 #ifdef CONFIG_PCIEAER 1442 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) 1443 { 1444 struct pci_dev *pdev = adapter->pdev; 1445 struct pci_dev *root = pdev->bus->self; 1446 u32 aer_pos; 1447 1448 /* root bus? */ 1449 if (!root) 1450 return; 1451 1452 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && 1453 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) 1454 return; 1455 1456 if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) 1457 return; 1458 1459 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); 1460 if (!aer_pos) 1461 return; 1462 1463 pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); 1464 } 1465 #endif 1466 1467 static int 1468 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1469 { 1470 struct net_device *netdev = NULL; 1471 struct netxen_adapter *adapter = NULL; 1472 int i = 0, err; 1473 int pci_func_id = PCI_FUNC(pdev->devfn); 1474 uint8_t revision_id; 1475 u32 val; 1476 1477 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1478 pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", 1479 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); 1480 return -ENODEV; 1481 } 1482 1483 if ((err = pci_enable_device(pdev))) 1484 return err; 1485 1486 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1487 err = -ENODEV; 1488 goto err_out_disable_pdev; 1489 } 1490 1491 if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) 1492 goto err_out_disable_pdev; 1493 1494 if (NX_IS_REVISION_P3(pdev->revision)) 1495 pci_enable_pcie_error_reporting(pdev); 1496 1497 pci_set_master(pdev); 1498 1499 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1500 if(!netdev) { 1501 err = -ENOMEM; 1502 goto err_out_free_res; 1503 } 1504 1505 SET_NETDEV_DEV(netdev, &pdev->dev); 1506 1507 adapter = netdev_priv(netdev); 1508 adapter->netdev = netdev; 1509 adapter->pdev = pdev; 1510 adapter->ahw.pci_func = pci_func_id; 1511 1512 revision_id = pdev->revision; 1513 adapter->ahw.revision_id = revision_id; 1514 1515 rwlock_init(&adapter->ahw.crb_lock); 1516 spin_lock_init(&adapter->ahw.mem_lock); 1517 1518 spin_lock_init(&adapter->tx_clean_lock); 1519 INIT_LIST_HEAD(&adapter->mac_list); 1520 INIT_LIST_HEAD(&adapter->ip_list); 1521 1522 err = netxen_setup_pci_map(adapter); 1523 if (err) 1524 goto err_out_free_netdev; 1525 1526 /* This will be reset for mezz cards */ 1527 adapter->portnum = pci_func_id; 1528 1529 err = netxen_nic_get_board_info(adapter); 1530 if (err) { 1531 dev_err(&pdev->dev, "Error getting board config info.\n"); 1532 goto err_out_iounmap; 1533 } 1534 1535 #ifdef CONFIG_PCIEAER 1536 netxen_mask_aer_correctable(adapter); 1537 #endif 1538 1539 /* Mezz cards have PCI function 0,2,3 enabled */ 1540 switch (adapter->ahw.board_type) { 1541 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: 1542 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: 1543 if (pci_func_id >= 2) 1544 adapter->portnum = pci_func_id - 2; 1545 break; 1546 default: 1547 break; 1548 } 1549 1550 err = netxen_check_flash_fw_compatibility(adapter); 1551 if (err) 1552 goto err_out_iounmap; 1553 1554 if (adapter->portnum == 0) { 1555 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 1556 if (val != 0xffffffff && val != 0) { 1557 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1558 adapter->need_fw_reset = 1; 1559 } 1560 } 1561 1562 err = netxen_start_firmware(adapter); 1563 if (err) 1564 goto err_out_decr_ref; 1565 1566 /* 1567 * See if the firmware gave us a virtual-physical port mapping. 1568 */ 1569 adapter->physical_port = adapter->portnum; 1570 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1571 i = NXRD32(adapter, CRB_V2P(adapter->portnum)); 1572 if (i != 0x55555555) 1573 adapter->physical_port = i; 1574 } 1575 1576 netxen_nic_clear_stats(adapter); 1577 1578 err = netxen_setup_intr(adapter); 1579 1580 if (err) { 1581 dev_err(&adapter->pdev->dev, 1582 "Failed to setup interrupts, error = %d\n", err); 1583 goto err_out_disable_msi; 1584 } 1585 1586 netxen_read_ula_info(adapter); 1587 1588 err = netxen_setup_netdev(adapter, netdev); 1589 if (err) 1590 goto err_out_disable_msi; 1591 1592 pci_set_drvdata(pdev, adapter); 1593 1594 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1595 1596 switch (adapter->ahw.port_type) { 1597 case NETXEN_NIC_GBE: 1598 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", 1599 adapter->netdev->name); 1600 break; 1601 case NETXEN_NIC_XGBE: 1602 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 1603 adapter->netdev->name); 1604 break; 1605 } 1606 1607 netxen_create_diag_entries(adapter); 1608 1609 return 0; 1610 1611 err_out_disable_msi: 1612 netxen_teardown_intr(adapter); 1613 1614 netxen_free_dummy_dma(adapter); 1615 1616 err_out_decr_ref: 1617 nx_decr_dev_ref_cnt(adapter); 1618 1619 err_out_iounmap: 1620 netxen_cleanup_pci_map(adapter); 1621 1622 err_out_free_netdev: 1623 free_netdev(netdev); 1624 1625 err_out_free_res: 1626 pci_release_regions(pdev); 1627 1628 err_out_disable_pdev: 1629 pci_disable_device(pdev); 1630 return err; 1631 } 1632 1633 static 1634 void netxen_cleanup_minidump(struct netxen_adapter *adapter) 1635 { 1636 kfree(adapter->mdump.md_template); 1637 adapter->mdump.md_template = NULL; 1638 1639 if (adapter->mdump.md_capture_buff) { 1640 vfree(adapter->mdump.md_capture_buff); 1641 adapter->mdump.md_capture_buff = NULL; 1642 } 1643 } 1644 1645 static void netxen_nic_remove(struct pci_dev *pdev) 1646 { 1647 struct netxen_adapter *adapter; 1648 struct net_device *netdev; 1649 1650 adapter = pci_get_drvdata(pdev); 1651 if (adapter == NULL) 1652 return; 1653 1654 netdev = adapter->netdev; 1655 1656 netxen_cancel_fw_work(adapter); 1657 1658 unregister_netdev(netdev); 1659 1660 cancel_work_sync(&adapter->tx_timeout_task); 1661 1662 netxen_free_ip_list(adapter, false); 1663 netxen_nic_detach(adapter); 1664 1665 nx_decr_dev_ref_cnt(adapter); 1666 1667 if (adapter->portnum == 0) 1668 netxen_free_dummy_dma(adapter); 1669 1670 clear_bit(__NX_RESETTING, &adapter->state); 1671 1672 netxen_teardown_intr(adapter); 1673 netxen_set_interrupt_mode(adapter, 0); 1674 netxen_remove_diag_entries(adapter); 1675 1676 netxen_cleanup_pci_map(adapter); 1677 1678 netxen_release_firmware(adapter); 1679 1680 if (NX_IS_REVISION_P3(pdev->revision)) { 1681 netxen_cleanup_minidump(adapter); 1682 pci_disable_pcie_error_reporting(pdev); 1683 } 1684 1685 pci_release_regions(pdev); 1686 pci_disable_device(pdev); 1687 1688 free_netdev(netdev); 1689 } 1690 1691 static void netxen_nic_detach_func(struct netxen_adapter *adapter) 1692 { 1693 struct net_device *netdev = adapter->netdev; 1694 1695 netif_device_detach(netdev); 1696 1697 netxen_cancel_fw_work(adapter); 1698 1699 if (netif_running(netdev)) 1700 netxen_nic_down(adapter, netdev); 1701 1702 cancel_work_sync(&adapter->tx_timeout_task); 1703 1704 netxen_nic_detach(adapter); 1705 1706 if (adapter->portnum == 0) 1707 netxen_free_dummy_dma(adapter); 1708 1709 nx_decr_dev_ref_cnt(adapter); 1710 1711 clear_bit(__NX_RESETTING, &adapter->state); 1712 } 1713 1714 static int netxen_nic_attach_func(struct pci_dev *pdev) 1715 { 1716 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1717 struct net_device *netdev = adapter->netdev; 1718 int err; 1719 1720 err = pci_enable_device(pdev); 1721 if (err) 1722 return err; 1723 1724 pci_set_power_state(pdev, PCI_D0); 1725 pci_set_master(pdev); 1726 pci_restore_state(pdev); 1727 1728 adapter->ahw.crb_win = -1; 1729 adapter->ahw.ocm_win = -1; 1730 1731 err = netxen_start_firmware(adapter); 1732 if (err) { 1733 dev_err(&pdev->dev, "failed to start firmware\n"); 1734 return err; 1735 } 1736 1737 if (netif_running(netdev)) { 1738 err = netxen_nic_attach(adapter); 1739 if (err) 1740 goto err_out; 1741 1742 err = netxen_nic_up(adapter, netdev); 1743 if (err) 1744 goto err_out_detach; 1745 1746 netxen_restore_indev_addr(netdev, NETDEV_UP); 1747 } 1748 1749 netif_device_attach(netdev); 1750 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1751 return 0; 1752 1753 err_out_detach: 1754 netxen_nic_detach(adapter); 1755 err_out: 1756 nx_decr_dev_ref_cnt(adapter); 1757 return err; 1758 } 1759 1760 static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, 1761 pci_channel_state_t state) 1762 { 1763 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1764 1765 if (state == pci_channel_io_perm_failure) 1766 return PCI_ERS_RESULT_DISCONNECT; 1767 1768 if (nx_dev_request_aer(adapter)) 1769 return PCI_ERS_RESULT_RECOVERED; 1770 1771 netxen_nic_detach_func(adapter); 1772 1773 pci_disable_device(pdev); 1774 1775 return PCI_ERS_RESULT_NEED_RESET; 1776 } 1777 1778 static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) 1779 { 1780 int err = 0; 1781 1782 err = netxen_nic_attach_func(pdev); 1783 1784 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1785 } 1786 1787 static void netxen_io_resume(struct pci_dev *pdev) 1788 { 1789 pci_cleanup_aer_uncorrect_error_status(pdev); 1790 } 1791 1792 static void netxen_nic_shutdown(struct pci_dev *pdev) 1793 { 1794 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1795 1796 netxen_nic_detach_func(adapter); 1797 1798 if (pci_save_state(pdev)) 1799 return; 1800 1801 if (netxen_nic_wol_supported(adapter)) { 1802 pci_enable_wake(pdev, PCI_D3cold, 1); 1803 pci_enable_wake(pdev, PCI_D3hot, 1); 1804 } 1805 1806 pci_disable_device(pdev); 1807 } 1808 1809 #ifdef CONFIG_PM 1810 static int 1811 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) 1812 { 1813 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1814 int retval; 1815 1816 netxen_nic_detach_func(adapter); 1817 1818 retval = pci_save_state(pdev); 1819 if (retval) 1820 return retval; 1821 1822 if (netxen_nic_wol_supported(adapter)) { 1823 pci_enable_wake(pdev, PCI_D3cold, 1); 1824 pci_enable_wake(pdev, PCI_D3hot, 1); 1825 } 1826 1827 pci_disable_device(pdev); 1828 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1829 1830 return 0; 1831 } 1832 1833 static int 1834 netxen_nic_resume(struct pci_dev *pdev) 1835 { 1836 return netxen_nic_attach_func(pdev); 1837 } 1838 #endif 1839 1840 static int netxen_nic_open(struct net_device *netdev) 1841 { 1842 struct netxen_adapter *adapter = netdev_priv(netdev); 1843 int err = 0; 1844 1845 if (adapter->driver_mismatch) 1846 return -EIO; 1847 1848 err = netxen_nic_attach(adapter); 1849 if (err) 1850 return err; 1851 1852 err = __netxen_nic_up(adapter, netdev); 1853 if (err) 1854 goto err_out; 1855 1856 netif_start_queue(netdev); 1857 1858 return 0; 1859 1860 err_out: 1861 netxen_nic_detach(adapter); 1862 return err; 1863 } 1864 1865 /* 1866 * netxen_nic_close - Disables a network interface entry point 1867 */ 1868 static int netxen_nic_close(struct net_device *netdev) 1869 { 1870 struct netxen_adapter *adapter = netdev_priv(netdev); 1871 1872 __netxen_nic_down(adapter, netdev); 1873 return 0; 1874 } 1875 1876 static void 1877 netxen_tso_check(struct net_device *netdev, 1878 struct nx_host_tx_ring *tx_ring, 1879 struct cmd_desc_type0 *first_desc, 1880 struct sk_buff *skb) 1881 { 1882 u8 opcode = TX_ETHER_PKT; 1883 __be16 protocol = skb->protocol; 1884 u16 flags = 0, vid = 0; 1885 u32 producer; 1886 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1887 struct cmd_desc_type0 *hwdesc; 1888 struct vlan_ethhdr *vh; 1889 1890 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 1891 1892 vh = (struct vlan_ethhdr *)skb->data; 1893 protocol = vh->h_vlan_encapsulated_proto; 1894 flags = FLAGS_VLAN_TAGGED; 1895 1896 } else if (vlan_tx_tag_present(skb)) { 1897 flags = FLAGS_VLAN_OOB; 1898 vid = vlan_tx_tag_get(skb); 1899 netxen_set_tx_vlan_tci(first_desc, vid); 1900 vlan_oob = 1; 1901 } 1902 1903 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1904 skb_shinfo(skb)->gso_size > 0) { 1905 1906 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1907 1908 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 1909 first_desc->total_hdr_length = hdr_len; 1910 if (vlan_oob) { 1911 first_desc->total_hdr_length += VLAN_HLEN; 1912 first_desc->tcp_hdr_offset = VLAN_HLEN; 1913 first_desc->ip_hdr_offset = VLAN_HLEN; 1914 /* Only in case of TSO on vlan device */ 1915 flags |= FLAGS_VLAN_TAGGED; 1916 } 1917 1918 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? 1919 TX_TCP_LSO6 : TX_TCP_LSO; 1920 tso = 1; 1921 1922 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1923 u8 l4proto; 1924 1925 if (protocol == cpu_to_be16(ETH_P_IP)) { 1926 l4proto = ip_hdr(skb)->protocol; 1927 1928 if (l4proto == IPPROTO_TCP) 1929 opcode = TX_TCP_PKT; 1930 else if(l4proto == IPPROTO_UDP) 1931 opcode = TX_UDP_PKT; 1932 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { 1933 l4proto = ipv6_hdr(skb)->nexthdr; 1934 1935 if (l4proto == IPPROTO_TCP) 1936 opcode = TX_TCPV6_PKT; 1937 else if(l4proto == IPPROTO_UDP) 1938 opcode = TX_UDPV6_PKT; 1939 } 1940 } 1941 1942 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 1943 first_desc->ip_hdr_offset += skb_network_offset(skb); 1944 netxen_set_tx_flags_opcode(first_desc, flags, opcode); 1945 1946 if (!tso) 1947 return; 1948 1949 /* For LSO, we need to copy the MAC/IP/TCP headers into 1950 * the descriptor ring 1951 */ 1952 producer = tx_ring->producer; 1953 copied = 0; 1954 offset = 2; 1955 1956 if (vlan_oob) { 1957 /* Create a TSO vlan header template for firmware */ 1958 1959 hwdesc = &tx_ring->desc_head[producer]; 1960 tx_ring->cmd_buf_arr[producer].skb = NULL; 1961 1962 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, 1963 hdr_len + VLAN_HLEN); 1964 1965 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); 1966 skb_copy_from_linear_data(skb, vh, 12); 1967 vh->h_vlan_proto = htons(ETH_P_8021Q); 1968 vh->h_vlan_TCI = htons(vid); 1969 skb_copy_from_linear_data_offset(skb, 12, 1970 (char *)vh + 16, copy_len - 16); 1971 1972 copied = copy_len - VLAN_HLEN; 1973 offset = 0; 1974 1975 producer = get_next_index(producer, tx_ring->num_desc); 1976 } 1977 1978 while (copied < hdr_len) { 1979 1980 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, 1981 (hdr_len - copied)); 1982 1983 hwdesc = &tx_ring->desc_head[producer]; 1984 tx_ring->cmd_buf_arr[producer].skb = NULL; 1985 1986 skb_copy_from_linear_data_offset(skb, copied, 1987 (char *)hwdesc + offset, copy_len); 1988 1989 copied += copy_len; 1990 offset = 0; 1991 1992 producer = get_next_index(producer, tx_ring->num_desc); 1993 } 1994 1995 tx_ring->producer = producer; 1996 barrier(); 1997 } 1998 1999 static int 2000 netxen_map_tx_skb(struct pci_dev *pdev, 2001 struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) 2002 { 2003 struct netxen_skb_frag *nf; 2004 struct skb_frag_struct *frag; 2005 int i, nr_frags; 2006 dma_addr_t map; 2007 2008 nr_frags = skb_shinfo(skb)->nr_frags; 2009 nf = &pbuf->frag_array[0]; 2010 2011 map = pci_map_single(pdev, skb->data, 2012 skb_headlen(skb), PCI_DMA_TODEVICE); 2013 if (pci_dma_mapping_error(pdev, map)) 2014 goto out_err; 2015 2016 nf->dma = map; 2017 nf->length = skb_headlen(skb); 2018 2019 for (i = 0; i < nr_frags; i++) { 2020 frag = &skb_shinfo(skb)->frags[i]; 2021 nf = &pbuf->frag_array[i+1]; 2022 2023 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 2024 DMA_TO_DEVICE); 2025 if (dma_mapping_error(&pdev->dev, map)) 2026 goto unwind; 2027 2028 nf->dma = map; 2029 nf->length = skb_frag_size(frag); 2030 } 2031 2032 return 0; 2033 2034 unwind: 2035 while (--i >= 0) { 2036 nf = &pbuf->frag_array[i+1]; 2037 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 2038 nf->dma = 0ULL; 2039 } 2040 2041 nf = &pbuf->frag_array[0]; 2042 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 2043 nf->dma = 0ULL; 2044 2045 out_err: 2046 return -ENOMEM; 2047 } 2048 2049 static inline void 2050 netxen_clear_cmddesc(u64 *desc) 2051 { 2052 desc[0] = 0ULL; 2053 desc[2] = 0ULL; 2054 } 2055 2056 static netdev_tx_t 2057 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2058 { 2059 struct netxen_adapter *adapter = netdev_priv(netdev); 2060 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 2061 struct netxen_cmd_buffer *pbuf; 2062 struct netxen_skb_frag *buffrag; 2063 struct cmd_desc_type0 *hwdesc, *first_desc; 2064 struct pci_dev *pdev; 2065 int i, k; 2066 int delta = 0; 2067 struct skb_frag_struct *frag; 2068 2069 u32 producer; 2070 int frag_count, no_of_desc; 2071 u32 num_txd = tx_ring->num_desc; 2072 2073 frag_count = skb_shinfo(skb)->nr_frags + 1; 2074 2075 /* 14 frags supported for normal packet and 2076 * 32 frags supported for TSO packet 2077 */ 2078 if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { 2079 2080 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { 2081 frag = &skb_shinfo(skb)->frags[i]; 2082 delta += skb_frag_size(frag); 2083 } 2084 2085 if (!__pskb_pull_tail(skb, delta)) 2086 goto drop_packet; 2087 2088 frag_count = 1 + skb_shinfo(skb)->nr_frags; 2089 } 2090 /* 4 fragments per cmd des */ 2091 no_of_desc = (frag_count + 3) >> 2; 2092 2093 if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 2094 netif_stop_queue(netdev); 2095 smp_mb(); 2096 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) 2097 netif_start_queue(netdev); 2098 else 2099 return NETDEV_TX_BUSY; 2100 } 2101 2102 producer = tx_ring->producer; 2103 pbuf = &tx_ring->cmd_buf_arr[producer]; 2104 2105 pdev = adapter->pdev; 2106 2107 if (netxen_map_tx_skb(pdev, skb, pbuf)) 2108 goto drop_packet; 2109 2110 pbuf->skb = skb; 2111 pbuf->frag_count = frag_count; 2112 2113 first_desc = hwdesc = &tx_ring->desc_head[producer]; 2114 netxen_clear_cmddesc((u64 *)hwdesc); 2115 2116 netxen_set_tx_frags_len(first_desc, frag_count, skb->len); 2117 netxen_set_tx_port(first_desc, adapter->portnum); 2118 2119 for (i = 0; i < frag_count; i++) { 2120 2121 k = i % 4; 2122 2123 if ((k == 0) && (i > 0)) { 2124 /* move to next desc.*/ 2125 producer = get_next_index(producer, num_txd); 2126 hwdesc = &tx_ring->desc_head[producer]; 2127 netxen_clear_cmddesc((u64 *)hwdesc); 2128 tx_ring->cmd_buf_arr[producer].skb = NULL; 2129 } 2130 2131 buffrag = &pbuf->frag_array[i]; 2132 2133 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); 2134 switch (k) { 2135 case 0: 2136 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); 2137 break; 2138 case 1: 2139 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); 2140 break; 2141 case 2: 2142 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); 2143 break; 2144 case 3: 2145 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); 2146 break; 2147 } 2148 } 2149 2150 tx_ring->producer = get_next_index(producer, num_txd); 2151 2152 netxen_tso_check(netdev, tx_ring, first_desc, skb); 2153 2154 adapter->stats.txbytes += skb->len; 2155 adapter->stats.xmitcalled++; 2156 2157 netxen_nic_update_cmd_producer(adapter, tx_ring); 2158 2159 return NETDEV_TX_OK; 2160 2161 drop_packet: 2162 adapter->stats.txdropped++; 2163 dev_kfree_skb_any(skb); 2164 return NETDEV_TX_OK; 2165 } 2166 2167 static int netxen_nic_check_temp(struct netxen_adapter *adapter) 2168 { 2169 struct net_device *netdev = adapter->netdev; 2170 uint32_t temp, temp_state, temp_val; 2171 int rv = 0; 2172 2173 temp = NXRD32(adapter, CRB_TEMP_STATE); 2174 2175 temp_state = nx_get_temp_state(temp); 2176 temp_val = nx_get_temp_val(temp); 2177 2178 if (temp_state == NX_TEMP_PANIC) { 2179 printk(KERN_ALERT 2180 "%s: Device temperature %d degrees C exceeds" 2181 " maximum allowed. Hardware has been shut down.\n", 2182 netdev->name, temp_val); 2183 rv = 1; 2184 } else if (temp_state == NX_TEMP_WARN) { 2185 if (adapter->temp == NX_TEMP_NORMAL) { 2186 printk(KERN_ALERT 2187 "%s: Device temperature %d degrees C " 2188 "exceeds operating range." 2189 " Immediate action needed.\n", 2190 netdev->name, temp_val); 2191 } 2192 } else { 2193 if (adapter->temp == NX_TEMP_WARN) { 2194 printk(KERN_INFO 2195 "%s: Device temperature is now %d degrees C" 2196 " in normal range.\n", netdev->name, 2197 temp_val); 2198 } 2199 } 2200 adapter->temp = temp_state; 2201 return rv; 2202 } 2203 2204 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) 2205 { 2206 struct net_device *netdev = adapter->netdev; 2207 2208 if (adapter->ahw.linkup && !linkup) { 2209 printk(KERN_INFO "%s: %s NIC Link is down\n", 2210 netxen_nic_driver_name, netdev->name); 2211 adapter->ahw.linkup = 0; 2212 if (netif_running(netdev)) { 2213 netif_carrier_off(netdev); 2214 netif_stop_queue(netdev); 2215 } 2216 adapter->link_changed = !adapter->has_link_events; 2217 } else if (!adapter->ahw.linkup && linkup) { 2218 printk(KERN_INFO "%s: %s NIC Link is up\n", 2219 netxen_nic_driver_name, netdev->name); 2220 adapter->ahw.linkup = 1; 2221 if (netif_running(netdev)) { 2222 netif_carrier_on(netdev); 2223 netif_wake_queue(netdev); 2224 } 2225 adapter->link_changed = !adapter->has_link_events; 2226 } 2227 } 2228 2229 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) 2230 { 2231 u32 val, port, linkup; 2232 2233 port = adapter->physical_port; 2234 2235 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2236 val = NXRD32(adapter, CRB_XG_STATE_P3); 2237 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 2238 linkup = (val == XG_LINK_UP_P3); 2239 } else { 2240 val = NXRD32(adapter, CRB_XG_STATE); 2241 val = (val >> port*8) & 0xff; 2242 linkup = (val == XG_LINK_UP); 2243 } 2244 2245 netxen_advert_link_change(adapter, linkup); 2246 } 2247 2248 static void netxen_tx_timeout(struct net_device *netdev) 2249 { 2250 struct netxen_adapter *adapter = netdev_priv(netdev); 2251 2252 if (test_bit(__NX_RESETTING, &adapter->state)) 2253 return; 2254 2255 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 2256 schedule_work(&adapter->tx_timeout_task); 2257 } 2258 2259 static void netxen_tx_timeout_task(struct work_struct *work) 2260 { 2261 struct netxen_adapter *adapter = 2262 container_of(work, struct netxen_adapter, tx_timeout_task); 2263 2264 if (!netif_running(adapter->netdev)) 2265 return; 2266 2267 if (test_and_set_bit(__NX_RESETTING, &adapter->state)) 2268 return; 2269 2270 if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) 2271 goto request_reset; 2272 2273 rtnl_lock(); 2274 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 2275 /* try to scrub interrupt */ 2276 netxen_napi_disable(adapter); 2277 2278 netxen_napi_enable(adapter); 2279 2280 netif_wake_queue(adapter->netdev); 2281 2282 clear_bit(__NX_RESETTING, &adapter->state); 2283 } else { 2284 clear_bit(__NX_RESETTING, &adapter->state); 2285 if (netxen_nic_reset_context(adapter)) { 2286 rtnl_unlock(); 2287 goto request_reset; 2288 } 2289 } 2290 adapter->netdev->trans_start = jiffies; 2291 rtnl_unlock(); 2292 return; 2293 2294 request_reset: 2295 adapter->need_fw_reset = 1; 2296 clear_bit(__NX_RESETTING, &adapter->state); 2297 } 2298 2299 static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, 2300 struct rtnl_link_stats64 *stats) 2301 { 2302 struct netxen_adapter *adapter = netdev_priv(netdev); 2303 2304 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2305 stats->tx_packets = adapter->stats.xmitfinished; 2306 stats->rx_bytes = adapter->stats.rxbytes; 2307 stats->tx_bytes = adapter->stats.txbytes; 2308 stats->rx_dropped = adapter->stats.rxdropped; 2309 stats->tx_dropped = adapter->stats.txdropped; 2310 2311 return stats; 2312 } 2313 2314 static irqreturn_t netxen_intr(int irq, void *data) 2315 { 2316 struct nx_host_sds_ring *sds_ring = data; 2317 struct netxen_adapter *adapter = sds_ring->adapter; 2318 u32 status = 0; 2319 2320 status = readl(adapter->isr_int_vec); 2321 2322 if (!(status & adapter->int_vec_bit)) 2323 return IRQ_NONE; 2324 2325 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2326 /* check interrupt state machine, to be sure */ 2327 status = readl(adapter->crb_int_state_reg); 2328 if (!ISR_LEGACY_INT_TRIGGERED(status)) 2329 return IRQ_NONE; 2330 2331 } else { 2332 unsigned long our_int = 0; 2333 2334 our_int = readl(adapter->crb_int_state_reg); 2335 2336 /* not our interrupt */ 2337 if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) 2338 return IRQ_NONE; 2339 2340 /* claim interrupt */ 2341 writel((our_int & 0xffffffff), adapter->crb_int_state_reg); 2342 2343 /* clear interrupt */ 2344 netxen_nic_disable_int(sds_ring); 2345 } 2346 2347 writel(0xffffffff, adapter->tgt_status_reg); 2348 /* read twice to ensure write is flushed */ 2349 readl(adapter->isr_int_vec); 2350 readl(adapter->isr_int_vec); 2351 2352 napi_schedule(&sds_ring->napi); 2353 2354 return IRQ_HANDLED; 2355 } 2356 2357 static irqreturn_t netxen_msi_intr(int irq, void *data) 2358 { 2359 struct nx_host_sds_ring *sds_ring = data; 2360 struct netxen_adapter *adapter = sds_ring->adapter; 2361 2362 /* clear interrupt */ 2363 writel(0xffffffff, adapter->tgt_status_reg); 2364 2365 napi_schedule(&sds_ring->napi); 2366 return IRQ_HANDLED; 2367 } 2368 2369 static irqreturn_t netxen_msix_intr(int irq, void *data) 2370 { 2371 struct nx_host_sds_ring *sds_ring = data; 2372 2373 napi_schedule(&sds_ring->napi); 2374 return IRQ_HANDLED; 2375 } 2376 2377 static int netxen_nic_poll(struct napi_struct *napi, int budget) 2378 { 2379 struct nx_host_sds_ring *sds_ring = 2380 container_of(napi, struct nx_host_sds_ring, napi); 2381 2382 struct netxen_adapter *adapter = sds_ring->adapter; 2383 2384 int tx_complete; 2385 int work_done; 2386 2387 tx_complete = netxen_process_cmd_ring(adapter); 2388 2389 work_done = netxen_process_rcv_ring(sds_ring, budget); 2390 2391 if ((work_done < budget) && tx_complete) { 2392 napi_complete(&sds_ring->napi); 2393 if (test_bit(__NX_DEV_UP, &adapter->state)) 2394 netxen_nic_enable_int(sds_ring); 2395 } 2396 2397 return work_done; 2398 } 2399 2400 #ifdef CONFIG_NET_POLL_CONTROLLER 2401 static void netxen_nic_poll_controller(struct net_device *netdev) 2402 { 2403 int ring; 2404 struct nx_host_sds_ring *sds_ring; 2405 struct netxen_adapter *adapter = netdev_priv(netdev); 2406 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 2407 2408 disable_irq(adapter->irq); 2409 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2410 sds_ring = &recv_ctx->sds_rings[ring]; 2411 netxen_intr(adapter->irq, sds_ring); 2412 } 2413 enable_irq(adapter->irq); 2414 } 2415 #endif 2416 2417 static int 2418 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) 2419 { 2420 int count; 2421 if (netxen_api_lock(adapter)) 2422 return -EIO; 2423 2424 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2425 2426 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); 2427 2428 netxen_api_unlock(adapter); 2429 return count; 2430 } 2431 2432 static int 2433 nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) 2434 { 2435 int count, state; 2436 if (netxen_api_lock(adapter)) 2437 return -EIO; 2438 2439 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2440 WARN_ON(count == 0); 2441 2442 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); 2443 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2444 2445 if (count == 0 && state != NX_DEV_FAILED) 2446 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); 2447 2448 netxen_api_unlock(adapter); 2449 return count; 2450 } 2451 2452 static int 2453 nx_dev_request_aer(struct netxen_adapter *adapter) 2454 { 2455 u32 state; 2456 int ret = -EINVAL; 2457 2458 if (netxen_api_lock(adapter)) 2459 return ret; 2460 2461 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2462 2463 if (state == NX_DEV_NEED_AER) 2464 ret = 0; 2465 else if (state == NX_DEV_READY) { 2466 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); 2467 ret = 0; 2468 } 2469 2470 netxen_api_unlock(adapter); 2471 return ret; 2472 } 2473 2474 int 2475 nx_dev_request_reset(struct netxen_adapter *adapter) 2476 { 2477 u32 state; 2478 int ret = -EINVAL; 2479 2480 if (netxen_api_lock(adapter)) 2481 return ret; 2482 2483 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2484 2485 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) 2486 ret = 0; 2487 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { 2488 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2489 adapter->flags |= NETXEN_FW_RESET_OWNER; 2490 ret = 0; 2491 } 2492 2493 netxen_api_unlock(adapter); 2494 2495 return ret; 2496 } 2497 2498 static int 2499 netxen_can_start_firmware(struct netxen_adapter *adapter) 2500 { 2501 int count; 2502 int can_start = 0; 2503 2504 if (netxen_api_lock(adapter)) { 2505 nx_incr_dev_ref_cnt(adapter); 2506 return -1; 2507 } 2508 2509 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2510 2511 if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) 2512 count = 0; 2513 2514 if (count == 0) { 2515 can_start = 1; 2516 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); 2517 } 2518 2519 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); 2520 2521 netxen_api_unlock(adapter); 2522 2523 return can_start; 2524 } 2525 2526 static void 2527 netxen_schedule_work(struct netxen_adapter *adapter, 2528 work_func_t func, int delay) 2529 { 2530 INIT_DELAYED_WORK(&adapter->fw_work, func); 2531 schedule_delayed_work(&adapter->fw_work, delay); 2532 } 2533 2534 static void 2535 netxen_cancel_fw_work(struct netxen_adapter *adapter) 2536 { 2537 while (test_and_set_bit(__NX_RESETTING, &adapter->state)) 2538 msleep(10); 2539 2540 cancel_delayed_work_sync(&adapter->fw_work); 2541 } 2542 2543 static void 2544 netxen_attach_work(struct work_struct *work) 2545 { 2546 struct netxen_adapter *adapter = container_of(work, 2547 struct netxen_adapter, fw_work.work); 2548 struct net_device *netdev = adapter->netdev; 2549 int err = 0; 2550 2551 if (netif_running(netdev)) { 2552 err = netxen_nic_attach(adapter); 2553 if (err) 2554 goto done; 2555 2556 err = netxen_nic_up(adapter, netdev); 2557 if (err) { 2558 netxen_nic_detach(adapter); 2559 goto done; 2560 } 2561 2562 netxen_restore_indev_addr(netdev, NETDEV_UP); 2563 } 2564 2565 netif_device_attach(netdev); 2566 2567 done: 2568 adapter->fw_fail_cnt = 0; 2569 clear_bit(__NX_RESETTING, &adapter->state); 2570 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 2571 } 2572 2573 static void 2574 netxen_fwinit_work(struct work_struct *work) 2575 { 2576 struct netxen_adapter *adapter = container_of(work, 2577 struct netxen_adapter, fw_work.work); 2578 int dev_state; 2579 int count; 2580 dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); 2581 if (adapter->flags & NETXEN_FW_RESET_OWNER) { 2582 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2583 WARN_ON(count == 0); 2584 if (count == 1) { 2585 if (adapter->mdump.md_enabled) { 2586 rtnl_lock(); 2587 netxen_dump_fw(adapter); 2588 rtnl_unlock(); 2589 } 2590 adapter->flags &= ~NETXEN_FW_RESET_OWNER; 2591 if (netxen_api_lock(adapter)) { 2592 clear_bit(__NX_RESETTING, &adapter->state); 2593 NXWR32(adapter, NX_CRB_DEV_STATE, 2594 NX_DEV_FAILED); 2595 return; 2596 } 2597 count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 2598 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); 2599 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); 2600 dev_state = NX_DEV_COLD; 2601 netxen_api_unlock(adapter); 2602 } 2603 } 2604 2605 switch (dev_state) { 2606 case NX_DEV_COLD: 2607 case NX_DEV_READY: 2608 if (!netxen_start_firmware(adapter)) { 2609 netxen_schedule_work(adapter, netxen_attach_work, 0); 2610 return; 2611 } 2612 break; 2613 2614 case NX_DEV_NEED_RESET: 2615 case NX_DEV_INITALIZING: 2616 netxen_schedule_work(adapter, 2617 netxen_fwinit_work, 2 * FW_POLL_DELAY); 2618 return; 2619 2620 case NX_DEV_FAILED: 2621 default: 2622 nx_incr_dev_ref_cnt(adapter); 2623 break; 2624 } 2625 2626 if (netxen_api_lock(adapter)) { 2627 clear_bit(__NX_RESETTING, &adapter->state); 2628 return; 2629 } 2630 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); 2631 netxen_api_unlock(adapter); 2632 dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n", 2633 adapter->netdev->name); 2634 2635 clear_bit(__NX_RESETTING, &adapter->state); 2636 } 2637 2638 static void 2639 netxen_detach_work(struct work_struct *work) 2640 { 2641 struct netxen_adapter *adapter = container_of(work, 2642 struct netxen_adapter, fw_work.work); 2643 struct net_device *netdev = adapter->netdev; 2644 int ref_cnt = 0, delay; 2645 u32 status; 2646 2647 netif_device_detach(netdev); 2648 2649 netxen_nic_down(adapter, netdev); 2650 2651 rtnl_lock(); 2652 netxen_nic_detach(adapter); 2653 rtnl_unlock(); 2654 2655 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2656 2657 if (status & NX_RCODE_FATAL_ERROR) 2658 goto err_ret; 2659 2660 if (adapter->temp == NX_TEMP_PANIC) 2661 goto err_ret; 2662 2663 if (!(adapter->flags & NETXEN_FW_RESET_OWNER)) 2664 ref_cnt = nx_decr_dev_ref_cnt(adapter); 2665 2666 if (ref_cnt == -EIO) 2667 goto err_ret; 2668 2669 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); 2670 2671 adapter->fw_wait_cnt = 0; 2672 netxen_schedule_work(adapter, netxen_fwinit_work, delay); 2673 2674 return; 2675 2676 err_ret: 2677 clear_bit(__NX_RESETTING, &adapter->state); 2678 } 2679 2680 static int 2681 netxen_check_health(struct netxen_adapter *adapter) 2682 { 2683 u32 state, heartbit; 2684 u32 peg_status; 2685 struct net_device *netdev = adapter->netdev; 2686 2687 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2688 if (state == NX_DEV_NEED_AER) 2689 return 0; 2690 2691 if (netxen_nic_check_temp(adapter)) 2692 goto detach; 2693 2694 if (adapter->need_fw_reset) { 2695 if (nx_dev_request_reset(adapter)) 2696 return 0; 2697 goto detach; 2698 } 2699 2700 /* NX_DEV_NEED_RESET, this state can be marked in two cases 2701 * 1. Tx timeout 2. Fw hang 2702 * Send request to destroy context in case of tx timeout only 2703 * and doesn't required in case of Fw hang 2704 */ 2705 if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) { 2706 adapter->need_fw_reset = 1; 2707 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2708 goto detach; 2709 } 2710 2711 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2712 return 0; 2713 2714 heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 2715 if (heartbit != adapter->heartbit) { 2716 adapter->heartbit = heartbit; 2717 adapter->fw_fail_cnt = 0; 2718 if (adapter->need_fw_reset) 2719 goto detach; 2720 return 0; 2721 } 2722 2723 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) 2724 return 0; 2725 2726 if (nx_dev_request_reset(adapter)) 2727 return 0; 2728 2729 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2730 2731 dev_err(&netdev->dev, "firmware hang detected\n"); 2732 peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2733 dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" 2734 "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" 2735 "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" 2736 "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" 2737 "PEG_NET_4_PC: 0x%x\n", 2738 peg_status, 2739 NXRD32(adapter, NETXEN_PEG_HALT_STATUS2), 2740 NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c), 2741 NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c), 2742 NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c), 2743 NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c), 2744 NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c)); 2745 if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67) 2746 dev_err(&adapter->pdev->dev, 2747 "Firmware aborted with error code 0x00006700. " 2748 "Device is being reset.\n"); 2749 detach: 2750 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2751 !test_and_set_bit(__NX_RESETTING, &adapter->state)) 2752 netxen_schedule_work(adapter, netxen_detach_work, 0); 2753 return 1; 2754 } 2755 2756 static void 2757 netxen_fw_poll_work(struct work_struct *work) 2758 { 2759 struct netxen_adapter *adapter = container_of(work, 2760 struct netxen_adapter, fw_work.work); 2761 2762 if (test_bit(__NX_RESETTING, &adapter->state)) 2763 goto reschedule; 2764 2765 if (test_bit(__NX_DEV_UP, &adapter->state)) { 2766 if (!adapter->has_link_events) { 2767 2768 netxen_nic_handle_phy_intr(adapter); 2769 2770 if (adapter->link_changed) 2771 netxen_nic_set_link_parameters(adapter); 2772 } 2773 } 2774 2775 if (netxen_check_health(adapter)) 2776 return; 2777 2778 reschedule: 2779 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 2780 } 2781 2782 static ssize_t 2783 netxen_store_bridged_mode(struct device *dev, 2784 struct device_attribute *attr, const char *buf, size_t len) 2785 { 2786 struct net_device *net = to_net_dev(dev); 2787 struct netxen_adapter *adapter = netdev_priv(net); 2788 unsigned long new; 2789 int ret = -EINVAL; 2790 2791 if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) 2792 goto err_out; 2793 2794 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 2795 goto err_out; 2796 2797 if (kstrtoul(buf, 2, &new)) 2798 goto err_out; 2799 2800 if (!netxen_config_bridged_mode(adapter, !!new)) 2801 ret = len; 2802 2803 err_out: 2804 return ret; 2805 } 2806 2807 static ssize_t 2808 netxen_show_bridged_mode(struct device *dev, 2809 struct device_attribute *attr, char *buf) 2810 { 2811 struct net_device *net = to_net_dev(dev); 2812 struct netxen_adapter *adapter; 2813 int bridged_mode = 0; 2814 2815 adapter = netdev_priv(net); 2816 2817 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) 2818 bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); 2819 2820 return sprintf(buf, "%d\n", bridged_mode); 2821 } 2822 2823 static struct device_attribute dev_attr_bridged_mode = { 2824 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, 2825 .show = netxen_show_bridged_mode, 2826 .store = netxen_store_bridged_mode, 2827 }; 2828 2829 static ssize_t 2830 netxen_store_diag_mode(struct device *dev, 2831 struct device_attribute *attr, const char *buf, size_t len) 2832 { 2833 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2834 unsigned long new; 2835 2836 if (kstrtoul(buf, 2, &new)) 2837 return -EINVAL; 2838 2839 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2840 adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; 2841 2842 return len; 2843 } 2844 2845 static ssize_t 2846 netxen_show_diag_mode(struct device *dev, 2847 struct device_attribute *attr, char *buf) 2848 { 2849 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2850 2851 return sprintf(buf, "%d\n", 2852 !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); 2853 } 2854 2855 static struct device_attribute dev_attr_diag_mode = { 2856 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, 2857 .show = netxen_show_diag_mode, 2858 .store = netxen_store_diag_mode, 2859 }; 2860 2861 static int 2862 netxen_sysfs_validate_crb(struct netxen_adapter *adapter, 2863 loff_t offset, size_t size) 2864 { 2865 size_t crb_size = 4; 2866 2867 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2868 return -EIO; 2869 2870 if (offset < NETXEN_PCI_CRBSPACE) { 2871 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2872 return -EINVAL; 2873 2874 if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2875 NETXEN_PCI_CAMQM_2M_END)) 2876 crb_size = 8; 2877 else 2878 return -EINVAL; 2879 } 2880 2881 if ((size != crb_size) || (offset & (crb_size-1))) 2882 return -EINVAL; 2883 2884 return 0; 2885 } 2886 2887 static ssize_t 2888 netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, 2889 struct bin_attribute *attr, 2890 char *buf, loff_t offset, size_t size) 2891 { 2892 struct device *dev = container_of(kobj, struct device, kobj); 2893 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2894 u32 data; 2895 u64 qmdata; 2896 int ret; 2897 2898 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2899 if (ret != 0) 2900 return ret; 2901 2902 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && 2903 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2904 NETXEN_PCI_CAMQM_2M_END)) { 2905 netxen_pci_camqm_read_2M(adapter, offset, &qmdata); 2906 memcpy(buf, &qmdata, size); 2907 } else { 2908 data = NXRD32(adapter, offset); 2909 memcpy(buf, &data, size); 2910 } 2911 2912 return size; 2913 } 2914 2915 static ssize_t 2916 netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, 2917 struct bin_attribute *attr, 2918 char *buf, loff_t offset, size_t size) 2919 { 2920 struct device *dev = container_of(kobj, struct device, kobj); 2921 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2922 u32 data; 2923 u64 qmdata; 2924 int ret; 2925 2926 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2927 if (ret != 0) 2928 return ret; 2929 2930 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && 2931 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, 2932 NETXEN_PCI_CAMQM_2M_END)) { 2933 memcpy(&qmdata, buf, size); 2934 netxen_pci_camqm_write_2M(adapter, offset, qmdata); 2935 } else { 2936 memcpy(&data, buf, size); 2937 NXWR32(adapter, offset, data); 2938 } 2939 2940 return size; 2941 } 2942 2943 static int 2944 netxen_sysfs_validate_mem(struct netxen_adapter *adapter, 2945 loff_t offset, size_t size) 2946 { 2947 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2948 return -EIO; 2949 2950 if ((size != 8) || (offset & 0x7)) 2951 return -EIO; 2952 2953 return 0; 2954 } 2955 2956 static ssize_t 2957 netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, 2958 struct bin_attribute *attr, 2959 char *buf, loff_t offset, size_t size) 2960 { 2961 struct device *dev = container_of(kobj, struct device, kobj); 2962 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2963 u64 data; 2964 int ret; 2965 2966 ret = netxen_sysfs_validate_mem(adapter, offset, size); 2967 if (ret != 0) 2968 return ret; 2969 2970 if (adapter->pci_mem_read(adapter, offset, &data)) 2971 return -EIO; 2972 2973 memcpy(buf, &data, size); 2974 2975 return size; 2976 } 2977 2978 static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, 2979 struct bin_attribute *attr, char *buf, 2980 loff_t offset, size_t size) 2981 { 2982 struct device *dev = container_of(kobj, struct device, kobj); 2983 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2984 u64 data; 2985 int ret; 2986 2987 ret = netxen_sysfs_validate_mem(adapter, offset, size); 2988 if (ret != 0) 2989 return ret; 2990 2991 memcpy(&data, buf, size); 2992 2993 if (adapter->pci_mem_write(adapter, offset, data)) 2994 return -EIO; 2995 2996 return size; 2997 } 2998 2999 3000 static struct bin_attribute bin_attr_crb = { 3001 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, 3002 .size = 0, 3003 .read = netxen_sysfs_read_crb, 3004 .write = netxen_sysfs_write_crb, 3005 }; 3006 3007 static struct bin_attribute bin_attr_mem = { 3008 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, 3009 .size = 0, 3010 .read = netxen_sysfs_read_mem, 3011 .write = netxen_sysfs_write_mem, 3012 }; 3013 3014 static ssize_t 3015 netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, 3016 struct bin_attribute *attr, 3017 char *buf, loff_t offset, size_t size) 3018 { 3019 struct device *dev = container_of(kobj, struct device, kobj); 3020 struct netxen_adapter *adapter = dev_get_drvdata(dev); 3021 struct net_device *netdev = adapter->netdev; 3022 struct netxen_dimm_cfg dimm; 3023 u8 dw, rows, cols, banks, ranks; 3024 u32 val; 3025 3026 if (size != sizeof(struct netxen_dimm_cfg)) { 3027 netdev_err(netdev, "Invalid size\n"); 3028 return -1; 3029 } 3030 3031 memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); 3032 val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); 3033 3034 /* Checks if DIMM info is valid. */ 3035 if (val & NETXEN_DIMM_VALID_FLAG) { 3036 netdev_err(netdev, "Invalid DIMM flag\n"); 3037 dimm.presence = 0xff; 3038 goto out; 3039 } 3040 3041 rows = NETXEN_DIMM_NUMROWS(val); 3042 cols = NETXEN_DIMM_NUMCOLS(val); 3043 ranks = NETXEN_DIMM_NUMRANKS(val); 3044 banks = NETXEN_DIMM_NUMBANKS(val); 3045 dw = NETXEN_DIMM_DATAWIDTH(val); 3046 3047 dimm.presence = (val & NETXEN_DIMM_PRESENT); 3048 3049 /* Checks if DIMM info is present. */ 3050 if (!dimm.presence) { 3051 netdev_err(netdev, "DIMM not present\n"); 3052 goto out; 3053 } 3054 3055 dimm.dimm_type = NETXEN_DIMM_TYPE(val); 3056 3057 switch (dimm.dimm_type) { 3058 case NETXEN_DIMM_TYPE_RDIMM: 3059 case NETXEN_DIMM_TYPE_UDIMM: 3060 case NETXEN_DIMM_TYPE_SO_DIMM: 3061 case NETXEN_DIMM_TYPE_Micro_DIMM: 3062 case NETXEN_DIMM_TYPE_Mini_RDIMM: 3063 case NETXEN_DIMM_TYPE_Mini_UDIMM: 3064 break; 3065 default: 3066 netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); 3067 goto out; 3068 } 3069 3070 if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) 3071 dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; 3072 else 3073 dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); 3074 3075 if (val & NETXEN_DIMM_SIZE) { 3076 dimm.size = NETXEN_DIMM_STD_MEM_SIZE; 3077 goto out; 3078 } 3079 3080 if (!rows) { 3081 netdev_err(netdev, "Invalid no of rows %x\n", rows); 3082 goto out; 3083 } 3084 3085 if (!cols) { 3086 netdev_err(netdev, "Invalid no of columns %x\n", cols); 3087 goto out; 3088 } 3089 3090 if (!banks) { 3091 netdev_err(netdev, "Invalid no of banks %x\n", banks); 3092 goto out; 3093 } 3094 3095 ranks += 1; 3096 3097 switch (dw) { 3098 case 0x0: 3099 dw = 32; 3100 break; 3101 case 0x1: 3102 dw = 33; 3103 break; 3104 case 0x2: 3105 dw = 36; 3106 break; 3107 case 0x3: 3108 dw = 64; 3109 break; 3110 case 0x4: 3111 dw = 72; 3112 break; 3113 case 0x5: 3114 dw = 80; 3115 break; 3116 case 0x6: 3117 dw = 128; 3118 break; 3119 case 0x7: 3120 dw = 144; 3121 break; 3122 default: 3123 netdev_err(netdev, "Invalid data-width %x\n", dw); 3124 goto out; 3125 } 3126 3127 dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; 3128 /* Size returned in MB. */ 3129 dimm.size = (dimm.size) / 0x100000; 3130 out: 3131 memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); 3132 return sizeof(struct netxen_dimm_cfg); 3133 3134 } 3135 3136 static struct bin_attribute bin_attr_dimm = { 3137 .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, 3138 .size = 0, 3139 .read = netxen_sysfs_read_dimm, 3140 }; 3141 3142 3143 static void 3144 netxen_create_sysfs_entries(struct netxen_adapter *adapter) 3145 { 3146 struct device *dev = &adapter->pdev->dev; 3147 3148 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { 3149 /* bridged_mode control */ 3150 if (device_create_file(dev, &dev_attr_bridged_mode)) { 3151 dev_warn(dev, 3152 "failed to create bridged_mode sysfs entry\n"); 3153 } 3154 } 3155 } 3156 3157 static void 3158 netxen_remove_sysfs_entries(struct netxen_adapter *adapter) 3159 { 3160 struct device *dev = &adapter->pdev->dev; 3161 3162 if (adapter->capabilities & NX_FW_CAPABILITY_BDG) 3163 device_remove_file(dev, &dev_attr_bridged_mode); 3164 } 3165 3166 static void 3167 netxen_create_diag_entries(struct netxen_adapter *adapter) 3168 { 3169 struct pci_dev *pdev = adapter->pdev; 3170 struct device *dev; 3171 3172 dev = &pdev->dev; 3173 if (device_create_file(dev, &dev_attr_diag_mode)) 3174 dev_info(dev, "failed to create diag_mode sysfs entry\n"); 3175 if (device_create_bin_file(dev, &bin_attr_crb)) 3176 dev_info(dev, "failed to create crb sysfs entry\n"); 3177 if (device_create_bin_file(dev, &bin_attr_mem)) 3178 dev_info(dev, "failed to create mem sysfs entry\n"); 3179 if (device_create_bin_file(dev, &bin_attr_dimm)) 3180 dev_info(dev, "failed to create dimm sysfs entry\n"); 3181 } 3182 3183 3184 static void 3185 netxen_remove_diag_entries(struct netxen_adapter *adapter) 3186 { 3187 struct pci_dev *pdev = adapter->pdev; 3188 struct device *dev = &pdev->dev; 3189 3190 device_remove_file(dev, &dev_attr_diag_mode); 3191 device_remove_bin_file(dev, &bin_attr_crb); 3192 device_remove_bin_file(dev, &bin_attr_mem); 3193 device_remove_bin_file(dev, &bin_attr_dimm); 3194 } 3195 3196 #ifdef CONFIG_INET 3197 3198 #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) 3199 3200 static int 3201 netxen_destip_supported(struct netxen_adapter *adapter) 3202 { 3203 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 3204 return 0; 3205 3206 if (adapter->ahw.cut_through) 3207 return 0; 3208 3209 return 1; 3210 } 3211 3212 static void 3213 netxen_free_ip_list(struct netxen_adapter *adapter, bool master) 3214 { 3215 struct nx_ip_list *cur, *tmp_cur; 3216 3217 list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { 3218 if (master) { 3219 if (cur->master) { 3220 netxen_config_ipaddr(adapter, cur->ip_addr, 3221 NX_IP_DOWN); 3222 list_del(&cur->list); 3223 kfree(cur); 3224 } 3225 } else { 3226 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); 3227 list_del(&cur->list); 3228 kfree(cur); 3229 } 3230 } 3231 } 3232 3233 static bool 3234 netxen_list_config_ip(struct netxen_adapter *adapter, 3235 struct in_ifaddr *ifa, unsigned long event) 3236 { 3237 struct net_device *dev; 3238 struct nx_ip_list *cur, *tmp_cur; 3239 struct list_head *head; 3240 bool ret = false; 3241 3242 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3243 3244 if (dev == NULL) 3245 goto out; 3246 3247 switch (event) { 3248 case NX_IP_UP: 3249 list_for_each(head, &adapter->ip_list) { 3250 cur = list_entry(head, struct nx_ip_list, list); 3251 3252 if (cur->ip_addr == ifa->ifa_address) 3253 goto out; 3254 } 3255 3256 cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); 3257 if (cur == NULL) 3258 goto out; 3259 if (dev->priv_flags & IFF_802_1Q_VLAN) 3260 dev = vlan_dev_real_dev(dev); 3261 cur->master = !!netif_is_bond_master(dev); 3262 cur->ip_addr = ifa->ifa_address; 3263 list_add_tail(&cur->list, &adapter->ip_list); 3264 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); 3265 ret = true; 3266 break; 3267 case NX_IP_DOWN: 3268 list_for_each_entry_safe(cur, tmp_cur, 3269 &adapter->ip_list, list) { 3270 if (cur->ip_addr == ifa->ifa_address) { 3271 list_del(&cur->list); 3272 kfree(cur); 3273 netxen_config_ipaddr(adapter, ifa->ifa_address, 3274 NX_IP_DOWN); 3275 ret = true; 3276 break; 3277 } 3278 } 3279 } 3280 out: 3281 return ret; 3282 } 3283 3284 static void 3285 netxen_config_indev_addr(struct netxen_adapter *adapter, 3286 struct net_device *dev, unsigned long event) 3287 { 3288 struct in_device *indev; 3289 3290 if (!netxen_destip_supported(adapter)) 3291 return; 3292 3293 indev = in_dev_get(dev); 3294 if (!indev) 3295 return; 3296 3297 for_ifa(indev) { 3298 switch (event) { 3299 case NETDEV_UP: 3300 netxen_list_config_ip(adapter, ifa, NX_IP_UP); 3301 break; 3302 case NETDEV_DOWN: 3303 netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); 3304 break; 3305 default: 3306 break; 3307 } 3308 } endfor_ifa(indev); 3309 3310 in_dev_put(indev); 3311 } 3312 3313 static void 3314 netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) 3315 3316 { 3317 struct netxen_adapter *adapter = netdev_priv(netdev); 3318 struct nx_ip_list *pos, *tmp_pos; 3319 unsigned long ip_event; 3320 3321 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3322 netxen_config_indev_addr(adapter, netdev, event); 3323 3324 list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { 3325 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); 3326 } 3327 } 3328 3329 static inline bool 3330 netxen_config_checkdev(struct net_device *dev) 3331 { 3332 struct netxen_adapter *adapter; 3333 3334 if (!is_netxen_netdev(dev)) 3335 return false; 3336 adapter = netdev_priv(dev); 3337 if (!adapter) 3338 return false; 3339 if (!netxen_destip_supported(adapter)) 3340 return false; 3341 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3342 return false; 3343 3344 return true; 3345 } 3346 3347 /** 3348 * netxen_config_master - configure addresses based on master 3349 * @dev: netxen device 3350 * @event: netdev event 3351 */ 3352 static void netxen_config_master(struct net_device *dev, unsigned long event) 3353 { 3354 struct net_device *master, *slave; 3355 struct netxen_adapter *adapter = netdev_priv(dev); 3356 3357 rcu_read_lock(); 3358 master = netdev_master_upper_dev_get_rcu(dev); 3359 /* 3360 * This is the case where the netxen nic is being 3361 * enslaved and is dev_open()ed in bond_enslave() 3362 * Now we should program the bond's (and its vlans') 3363 * addresses in the netxen NIC. 3364 */ 3365 if (master && netif_is_bond_master(master) && 3366 !netif_is_bond_slave(dev)) { 3367 netxen_config_indev_addr(adapter, master, event); 3368 for_each_netdev_rcu(&init_net, slave) 3369 if (slave->priv_flags & IFF_802_1Q_VLAN && 3370 vlan_dev_real_dev(slave) == master) 3371 netxen_config_indev_addr(adapter, slave, event); 3372 } 3373 rcu_read_unlock(); 3374 /* 3375 * This is the case where the netxen nic is being 3376 * released and is dev_close()ed in bond_release() 3377 * just before IFF_BONDING is stripped. 3378 */ 3379 if (!master && dev->priv_flags & IFF_BONDING) 3380 netxen_free_ip_list(adapter, true); 3381 } 3382 3383 static int netxen_netdev_event(struct notifier_block *this, 3384 unsigned long event, void *ptr) 3385 { 3386 struct netxen_adapter *adapter; 3387 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3388 struct net_device *orig_dev = dev; 3389 struct net_device *slave; 3390 3391 recheck: 3392 if (dev == NULL) 3393 goto done; 3394 3395 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3396 dev = vlan_dev_real_dev(dev); 3397 goto recheck; 3398 } 3399 if (event == NETDEV_UP || event == NETDEV_DOWN) { 3400 /* If this is a bonding device, look for netxen-based slaves*/ 3401 if (netif_is_bond_master(dev)) { 3402 rcu_read_lock(); 3403 for_each_netdev_in_bond_rcu(dev, slave) { 3404 if (!netxen_config_checkdev(slave)) 3405 continue; 3406 adapter = netdev_priv(slave); 3407 netxen_config_indev_addr(adapter, 3408 orig_dev, event); 3409 } 3410 rcu_read_unlock(); 3411 } else { 3412 if (!netxen_config_checkdev(dev)) 3413 goto done; 3414 adapter = netdev_priv(dev); 3415 /* Act only if the actual netxen is the target */ 3416 if (orig_dev == dev) 3417 netxen_config_master(dev, event); 3418 netxen_config_indev_addr(adapter, orig_dev, event); 3419 } 3420 } 3421 done: 3422 return NOTIFY_DONE; 3423 } 3424 3425 static int 3426 netxen_inetaddr_event(struct notifier_block *this, 3427 unsigned long event, void *ptr) 3428 { 3429 struct netxen_adapter *adapter; 3430 struct net_device *dev, *slave; 3431 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3432 unsigned long ip_event; 3433 3434 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3435 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3436 recheck: 3437 if (dev == NULL) 3438 goto done; 3439 3440 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3441 dev = vlan_dev_real_dev(dev); 3442 goto recheck; 3443 } 3444 if (event == NETDEV_UP || event == NETDEV_DOWN) { 3445 /* If this is a bonding device, look for netxen-based slaves*/ 3446 if (netif_is_bond_master(dev)) { 3447 rcu_read_lock(); 3448 for_each_netdev_in_bond_rcu(dev, slave) { 3449 if (!netxen_config_checkdev(slave)) 3450 continue; 3451 adapter = netdev_priv(slave); 3452 netxen_list_config_ip(adapter, ifa, ip_event); 3453 } 3454 rcu_read_unlock(); 3455 } else { 3456 if (!netxen_config_checkdev(dev)) 3457 goto done; 3458 adapter = netdev_priv(dev); 3459 netxen_list_config_ip(adapter, ifa, ip_event); 3460 } 3461 } 3462 done: 3463 return NOTIFY_DONE; 3464 } 3465 3466 static struct notifier_block netxen_netdev_cb = { 3467 .notifier_call = netxen_netdev_event, 3468 }; 3469 3470 static struct notifier_block netxen_inetaddr_cb = { 3471 .notifier_call = netxen_inetaddr_event, 3472 }; 3473 #else 3474 static void 3475 netxen_restore_indev_addr(struct net_device *dev, unsigned long event) 3476 { } 3477 static void 3478 netxen_free_ip_list(struct netxen_adapter *adapter, bool master) 3479 { } 3480 #endif 3481 3482 static const struct pci_error_handlers netxen_err_handler = { 3483 .error_detected = netxen_io_error_detected, 3484 .slot_reset = netxen_io_slot_reset, 3485 .resume = netxen_io_resume, 3486 }; 3487 3488 static struct pci_driver netxen_driver = { 3489 .name = netxen_nic_driver_name, 3490 .id_table = netxen_pci_tbl, 3491 .probe = netxen_nic_probe, 3492 .remove = netxen_nic_remove, 3493 #ifdef CONFIG_PM 3494 .suspend = netxen_nic_suspend, 3495 .resume = netxen_nic_resume, 3496 #endif 3497 .shutdown = netxen_nic_shutdown, 3498 .err_handler = &netxen_err_handler 3499 }; 3500 3501 static int __init netxen_init_module(void) 3502 { 3503 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 3504 3505 #ifdef CONFIG_INET 3506 register_netdevice_notifier(&netxen_netdev_cb); 3507 register_inetaddr_notifier(&netxen_inetaddr_cb); 3508 #endif 3509 return pci_register_driver(&netxen_driver); 3510 } 3511 3512 module_init(netxen_init_module); 3513 3514 static void __exit netxen_exit_module(void) 3515 { 3516 pci_unregister_driver(&netxen_driver); 3517 3518 #ifdef CONFIG_INET 3519 unregister_inetaddr_notifier(&netxen_inetaddr_cb); 3520 unregister_netdevice_notifier(&netxen_netdev_cb); 3521 #endif 3522 } 3523 3524 module_exit(netxen_exit_module); 3525