1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/delay.h> 16 #include <linux/notifier.h> 17 #include <linux/ip.h> 18 #include <linux/tcp.h> 19 #include <linux/in.h> 20 #include <linux/ethtool.h> 21 #include <linux/topology.h> 22 #include <linux/gfp.h> 23 #include <linux/aer.h> 24 #include <linux/interrupt.h> 25 #include "net_driver.h" 26 #include "efx.h" 27 #include "nic.h" 28 #include "selftest.h" 29 #include "sriov.h" 30 31 #include "mcdi.h" 32 #include "workarounds.h" 33 34 /************************************************************************** 35 * 36 * Type name strings 37 * 38 ************************************************************************** 39 */ 40 41 /* Loopback mode names (see LOOPBACK_MODE()) */ 42 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 43 const char *const efx_loopback_mode_names[] = { 44 [LOOPBACK_NONE] = "NONE", 45 [LOOPBACK_DATA] = "DATAPATH", 46 [LOOPBACK_GMAC] = "GMAC", 47 [LOOPBACK_XGMII] = "XGMII", 48 [LOOPBACK_XGXS] = "XGXS", 49 [LOOPBACK_XAUI] = "XAUI", 50 [LOOPBACK_GMII] = "GMII", 51 [LOOPBACK_SGMII] = "SGMII", 52 [LOOPBACK_XGBR] = "XGBR", 53 [LOOPBACK_XFI] = "XFI", 54 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 55 [LOOPBACK_GMII_FAR] = "GMII_FAR", 56 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 57 [LOOPBACK_XFI_FAR] = "XFI_FAR", 58 [LOOPBACK_GPHY] = "GPHY", 59 [LOOPBACK_PHYXS] = "PHYXS", 60 [LOOPBACK_PCS] = "PCS", 61 [LOOPBACK_PMAPMD] = "PMA/PMD", 62 [LOOPBACK_XPORT] = "XPORT", 63 [LOOPBACK_XGMII_WS] = "XGMII_WS", 64 [LOOPBACK_XAUI_WS] = "XAUI_WS", 65 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 66 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 67 [LOOPBACK_GMII_WS] = "GMII_WS", 68 [LOOPBACK_XFI_WS] = "XFI_WS", 69 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 70 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 71 }; 72 73 const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 74 const char *const efx_reset_type_names[] = { 75 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 76 [RESET_TYPE_ALL] = "ALL", 77 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 78 [RESET_TYPE_WORLD] = "WORLD", 79 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 80 [RESET_TYPE_DATAPATH] = "DATAPATH", 81 [RESET_TYPE_MC_BIST] = "MC_BIST", 82 [RESET_TYPE_DISABLE] = "DISABLE", 83 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 84 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 85 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 86 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 87 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 88 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 89 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", 90 }; 91 92 /* Reset workqueue. If any NIC has a hardware failure then a reset will be 93 * queued onto this work queue. This is not a per-nic work queue, because 94 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 95 */ 96 static struct workqueue_struct *reset_workqueue; 97 98 /* How often and how many times to poll for a reset while waiting for a 99 * BIST that another function started to complete. 100 */ 101 #define BIST_WAIT_DELAY_MS 100 102 #define BIST_WAIT_DELAY_COUNT 100 103 104 /************************************************************************** 105 * 106 * Configurable values 107 * 108 *************************************************************************/ 109 110 /* 111 * Use separate channels for TX and RX events 112 * 113 * Set this to 1 to use separate channels for TX and RX. It allows us 114 * to control interrupt affinity separately for TX and RX. 115 * 116 * This is only used in MSI-X interrupt mode 117 */ 118 static bool separate_tx_channels; 119 module_param(separate_tx_channels, bool, 0444); 120 MODULE_PARM_DESC(separate_tx_channels, 121 "Use separate channels for TX and RX"); 122 123 /* This is the weight assigned to each of the (per-channel) virtual 124 * NAPI devices. 125 */ 126 static int napi_weight = 64; 127 128 /* This is the time (in jiffies) between invocations of the hardware 129 * monitor. 130 * On Falcon-based NICs, this will: 131 * - Check the on-board hardware monitor; 132 * - Poll the link state and reconfigure the hardware as necessary. 133 * On Siena-based NICs for power systems with EEH support, this will give EEH a 134 * chance to start. 135 */ 136 static unsigned int efx_monitor_interval = 1 * HZ; 137 138 /* Initial interrupt moderation settings. They can be modified after 139 * module load with ethtool. 140 * 141 * The default for RX should strike a balance between increasing the 142 * round-trip latency and reducing overhead. 143 */ 144 static unsigned int rx_irq_mod_usec = 60; 145 146 /* Initial interrupt moderation settings. They can be modified after 147 * module load with ethtool. 148 * 149 * This default is chosen to ensure that a 10G link does not go idle 150 * while a TX queue is stopped after it has become full. A queue is 151 * restarted when it drops below half full. The time this takes (assuming 152 * worst case 3 descriptors per packet and 1024 descriptors) is 153 * 512 / 3 * 1.2 = 205 usec. 154 */ 155 static unsigned int tx_irq_mod_usec = 150; 156 157 /* This is the first interrupt mode to try out of: 158 * 0 => MSI-X 159 * 1 => MSI 160 * 2 => legacy 161 */ 162 static unsigned int interrupt_mode; 163 164 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 165 * i.e. the number of CPUs among which we may distribute simultaneous 166 * interrupt handling. 167 * 168 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 169 * The default (0) means to assign an interrupt to each core. 170 */ 171 static unsigned int rss_cpus; 172 module_param(rss_cpus, uint, 0444); 173 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 174 175 static bool phy_flash_cfg; 176 module_param(phy_flash_cfg, bool, 0644); 177 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 178 179 static unsigned irq_adapt_low_thresh = 8000; 180 module_param(irq_adapt_low_thresh, uint, 0644); 181 MODULE_PARM_DESC(irq_adapt_low_thresh, 182 "Threshold score for reducing IRQ moderation"); 183 184 static unsigned irq_adapt_high_thresh = 16000; 185 module_param(irq_adapt_high_thresh, uint, 0644); 186 MODULE_PARM_DESC(irq_adapt_high_thresh, 187 "Threshold score for increasing IRQ moderation"); 188 189 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 190 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 191 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 192 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 193 module_param(debug, uint, 0); 194 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 195 196 /************************************************************************** 197 * 198 * Utility functions and prototypes 199 * 200 *************************************************************************/ 201 202 static int efx_soft_enable_interrupts(struct efx_nic *efx); 203 static void efx_soft_disable_interrupts(struct efx_nic *efx); 204 static void efx_remove_channel(struct efx_channel *channel); 205 static void efx_remove_channels(struct efx_nic *efx); 206 static const struct efx_channel_type efx_default_channel_type; 207 static void efx_remove_port(struct efx_nic *efx); 208 static void efx_init_napi_channel(struct efx_channel *channel); 209 static void efx_fini_napi(struct efx_nic *efx); 210 static void efx_fini_napi_channel(struct efx_channel *channel); 211 static void efx_fini_struct(struct efx_nic *efx); 212 static void efx_start_all(struct efx_nic *efx); 213 static void efx_stop_all(struct efx_nic *efx); 214 215 #define EFX_ASSERT_RESET_SERIALISED(efx) \ 216 do { \ 217 if ((efx->state == STATE_READY) || \ 218 (efx->state == STATE_RECOVERY) || \ 219 (efx->state == STATE_DISABLED)) \ 220 ASSERT_RTNL(); \ 221 } while (0) 222 223 static int efx_check_disabled(struct efx_nic *efx) 224 { 225 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { 226 netif_err(efx, drv, efx->net_dev, 227 "device is disabled due to earlier errors\n"); 228 return -EIO; 229 } 230 return 0; 231 } 232 233 /************************************************************************** 234 * 235 * Event queue processing 236 * 237 *************************************************************************/ 238 239 /* Process channel's event queue 240 * 241 * This function is responsible for processing the event queue of a 242 * single channel. The caller must guarantee that this function will 243 * never be concurrently called more than once on the same channel, 244 * though different channels may be being processed concurrently. 245 */ 246 static int efx_process_channel(struct efx_channel *channel, int budget) 247 { 248 struct efx_tx_queue *tx_queue; 249 int spent; 250 251 if (unlikely(!channel->enabled)) 252 return 0; 253 254 efx_for_each_channel_tx_queue(tx_queue, channel) { 255 tx_queue->pkts_compl = 0; 256 tx_queue->bytes_compl = 0; 257 } 258 259 spent = efx_nic_process_eventq(channel, budget); 260 if (spent && efx_channel_has_rx_queue(channel)) { 261 struct efx_rx_queue *rx_queue = 262 efx_channel_get_rx_queue(channel); 263 264 efx_rx_flush_packet(channel); 265 efx_fast_push_rx_descriptors(rx_queue, true); 266 } 267 268 /* Update BQL */ 269 efx_for_each_channel_tx_queue(tx_queue, channel) { 270 if (tx_queue->bytes_compl) { 271 netdev_tx_completed_queue(tx_queue->core_txq, 272 tx_queue->pkts_compl, tx_queue->bytes_compl); 273 } 274 } 275 276 return spent; 277 } 278 279 /* NAPI poll handler 280 * 281 * NAPI guarantees serialisation of polls of the same device, which 282 * provides the guarantee required by efx_process_channel(). 283 */ 284 static int efx_poll(struct napi_struct *napi, int budget) 285 { 286 struct efx_channel *channel = 287 container_of(napi, struct efx_channel, napi_str); 288 struct efx_nic *efx = channel->efx; 289 int spent; 290 291 if (!efx_channel_lock_napi(channel)) 292 return budget; 293 294 netif_vdbg(efx, intr, efx->net_dev, 295 "channel %d NAPI poll executing on CPU %d\n", 296 channel->channel, raw_smp_processor_id()); 297 298 spent = efx_process_channel(channel, budget); 299 300 if (spent < budget) { 301 if (efx_channel_has_rx_queue(channel) && 302 efx->irq_rx_adaptive && 303 unlikely(++channel->irq_count == 1000)) { 304 if (unlikely(channel->irq_mod_score < 305 irq_adapt_low_thresh)) { 306 if (channel->irq_moderation > 1) { 307 channel->irq_moderation -= 1; 308 efx->type->push_irq_moderation(channel); 309 } 310 } else if (unlikely(channel->irq_mod_score > 311 irq_adapt_high_thresh)) { 312 if (channel->irq_moderation < 313 efx->irq_rx_moderation) { 314 channel->irq_moderation += 1; 315 efx->type->push_irq_moderation(channel); 316 } 317 } 318 channel->irq_count = 0; 319 channel->irq_mod_score = 0; 320 } 321 322 efx_filter_rfs_expire(channel); 323 324 /* There is no race here; although napi_disable() will 325 * only wait for napi_complete(), this isn't a problem 326 * since efx_nic_eventq_read_ack() will have no effect if 327 * interrupts have already been disabled. 328 */ 329 napi_complete(napi); 330 efx_nic_eventq_read_ack(channel); 331 } 332 333 efx_channel_unlock_napi(channel); 334 return spent; 335 } 336 337 /* Create event queue 338 * Event queue memory allocations are done only once. If the channel 339 * is reset, the memory buffer will be reused; this guards against 340 * errors during channel reset and also simplifies interrupt handling. 341 */ 342 static int efx_probe_eventq(struct efx_channel *channel) 343 { 344 struct efx_nic *efx = channel->efx; 345 unsigned long entries; 346 347 netif_dbg(efx, probe, efx->net_dev, 348 "chan %d create event queue\n", channel->channel); 349 350 /* Build an event queue with room for one event per tx and rx buffer, 351 * plus some extra for link state events and MCDI completions. */ 352 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 353 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); 354 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; 355 356 return efx_nic_probe_eventq(channel); 357 } 358 359 /* Prepare channel's event queue */ 360 static int efx_init_eventq(struct efx_channel *channel) 361 { 362 struct efx_nic *efx = channel->efx; 363 int rc; 364 365 EFX_WARN_ON_PARANOID(channel->eventq_init); 366 367 netif_dbg(efx, drv, efx->net_dev, 368 "chan %d init event queue\n", channel->channel); 369 370 rc = efx_nic_init_eventq(channel); 371 if (rc == 0) { 372 efx->type->push_irq_moderation(channel); 373 channel->eventq_read_ptr = 0; 374 channel->eventq_init = true; 375 } 376 return rc; 377 } 378 379 /* Enable event queue processing and NAPI */ 380 void efx_start_eventq(struct efx_channel *channel) 381 { 382 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 383 "chan %d start event queue\n", channel->channel); 384 385 /* Make sure the NAPI handler sees the enabled flag set */ 386 channel->enabled = true; 387 smp_wmb(); 388 389 efx_channel_enable(channel); 390 napi_enable(&channel->napi_str); 391 efx_nic_eventq_read_ack(channel); 392 } 393 394 /* Disable event queue processing and NAPI */ 395 void efx_stop_eventq(struct efx_channel *channel) 396 { 397 if (!channel->enabled) 398 return; 399 400 napi_disable(&channel->napi_str); 401 while (!efx_channel_disable(channel)) 402 usleep_range(1000, 20000); 403 channel->enabled = false; 404 } 405 406 static void efx_fini_eventq(struct efx_channel *channel) 407 { 408 if (!channel->eventq_init) 409 return; 410 411 netif_dbg(channel->efx, drv, channel->efx->net_dev, 412 "chan %d fini event queue\n", channel->channel); 413 414 efx_nic_fini_eventq(channel); 415 channel->eventq_init = false; 416 } 417 418 static void efx_remove_eventq(struct efx_channel *channel) 419 { 420 netif_dbg(channel->efx, drv, channel->efx->net_dev, 421 "chan %d remove event queue\n", channel->channel); 422 423 efx_nic_remove_eventq(channel); 424 } 425 426 /************************************************************************** 427 * 428 * Channel handling 429 * 430 *************************************************************************/ 431 432 /* Allocate and initialise a channel structure. */ 433 static struct efx_channel * 434 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 435 { 436 struct efx_channel *channel; 437 struct efx_rx_queue *rx_queue; 438 struct efx_tx_queue *tx_queue; 439 int j; 440 441 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 442 if (!channel) 443 return NULL; 444 445 channel->efx = efx; 446 channel->channel = i; 447 channel->type = &efx_default_channel_type; 448 449 for (j = 0; j < EFX_TXQ_TYPES; j++) { 450 tx_queue = &channel->tx_queue[j]; 451 tx_queue->efx = efx; 452 tx_queue->queue = i * EFX_TXQ_TYPES + j; 453 tx_queue->channel = channel; 454 } 455 456 rx_queue = &channel->rx_queue; 457 rx_queue->efx = efx; 458 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 459 (unsigned long)rx_queue); 460 461 return channel; 462 } 463 464 /* Allocate and initialise a channel structure, copying parameters 465 * (but not resources) from an old channel structure. 466 */ 467 static struct efx_channel * 468 efx_copy_channel(const struct efx_channel *old_channel) 469 { 470 struct efx_channel *channel; 471 struct efx_rx_queue *rx_queue; 472 struct efx_tx_queue *tx_queue; 473 int j; 474 475 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 476 if (!channel) 477 return NULL; 478 479 *channel = *old_channel; 480 481 channel->napi_dev = NULL; 482 memset(&channel->eventq, 0, sizeof(channel->eventq)); 483 484 for (j = 0; j < EFX_TXQ_TYPES; j++) { 485 tx_queue = &channel->tx_queue[j]; 486 if (tx_queue->channel) 487 tx_queue->channel = channel; 488 tx_queue->buffer = NULL; 489 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 490 } 491 492 rx_queue = &channel->rx_queue; 493 rx_queue->buffer = NULL; 494 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 495 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 496 (unsigned long)rx_queue); 497 498 return channel; 499 } 500 501 static int efx_probe_channel(struct efx_channel *channel) 502 { 503 struct efx_tx_queue *tx_queue; 504 struct efx_rx_queue *rx_queue; 505 int rc; 506 507 netif_dbg(channel->efx, probe, channel->efx->net_dev, 508 "creating channel %d\n", channel->channel); 509 510 rc = channel->type->pre_probe(channel); 511 if (rc) 512 goto fail; 513 514 rc = efx_probe_eventq(channel); 515 if (rc) 516 goto fail; 517 518 efx_for_each_channel_tx_queue(tx_queue, channel) { 519 rc = efx_probe_tx_queue(tx_queue); 520 if (rc) 521 goto fail; 522 } 523 524 efx_for_each_channel_rx_queue(rx_queue, channel) { 525 rc = efx_probe_rx_queue(rx_queue); 526 if (rc) 527 goto fail; 528 } 529 530 return 0; 531 532 fail: 533 efx_remove_channel(channel); 534 return rc; 535 } 536 537 static void 538 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) 539 { 540 struct efx_nic *efx = channel->efx; 541 const char *type; 542 int number; 543 544 number = channel->channel; 545 if (efx->tx_channel_offset == 0) { 546 type = ""; 547 } else if (channel->channel < efx->tx_channel_offset) { 548 type = "-rx"; 549 } else { 550 type = "-tx"; 551 number -= efx->tx_channel_offset; 552 } 553 snprintf(buf, len, "%s%s-%d", efx->name, type, number); 554 } 555 556 static void efx_set_channel_names(struct efx_nic *efx) 557 { 558 struct efx_channel *channel; 559 560 efx_for_each_channel(channel, efx) 561 channel->type->get_name(channel, 562 efx->msi_context[channel->channel].name, 563 sizeof(efx->msi_context[0].name)); 564 } 565 566 static int efx_probe_channels(struct efx_nic *efx) 567 { 568 struct efx_channel *channel; 569 int rc; 570 571 /* Restart special buffer allocation */ 572 efx->next_buffer_table = 0; 573 574 /* Probe channels in reverse, so that any 'extra' channels 575 * use the start of the buffer table. This allows the traffic 576 * channels to be resized without moving them or wasting the 577 * entries before them. 578 */ 579 efx_for_each_channel_rev(channel, efx) { 580 rc = efx_probe_channel(channel); 581 if (rc) { 582 netif_err(efx, probe, efx->net_dev, 583 "failed to create channel %d\n", 584 channel->channel); 585 goto fail; 586 } 587 } 588 efx_set_channel_names(efx); 589 590 return 0; 591 592 fail: 593 efx_remove_channels(efx); 594 return rc; 595 } 596 597 /* Channels are shutdown and reinitialised whilst the NIC is running 598 * to propagate configuration changes (mtu, checksum offload), or 599 * to clear hardware error conditions 600 */ 601 static void efx_start_datapath(struct efx_nic *efx) 602 { 603 bool old_rx_scatter = efx->rx_scatter; 604 struct efx_tx_queue *tx_queue; 605 struct efx_rx_queue *rx_queue; 606 struct efx_channel *channel; 607 size_t rx_buf_len; 608 609 /* Calculate the rx buffer allocation parameters required to 610 * support the current MTU, including padding for header 611 * alignment and overruns. 612 */ 613 efx->rx_dma_len = (efx->rx_prefix_size + 614 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 615 efx->type->rx_buffer_padding); 616 rx_buf_len = (sizeof(struct efx_rx_page_state) + 617 efx->rx_ip_align + efx->rx_dma_len); 618 if (rx_buf_len <= PAGE_SIZE) { 619 efx->rx_scatter = efx->type->always_rx_scatter; 620 efx->rx_buffer_order = 0; 621 } else if (efx->type->can_rx_scatter) { 622 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 623 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + 624 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, 625 EFX_RX_BUF_ALIGNMENT) > 626 PAGE_SIZE); 627 efx->rx_scatter = true; 628 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; 629 efx->rx_buffer_order = 0; 630 } else { 631 efx->rx_scatter = false; 632 efx->rx_buffer_order = get_order(rx_buf_len); 633 } 634 635 efx_rx_config_page_split(efx); 636 if (efx->rx_buffer_order) 637 netif_dbg(efx, drv, efx->net_dev, 638 "RX buf len=%u; page order=%u batch=%u\n", 639 efx->rx_dma_len, efx->rx_buffer_order, 640 efx->rx_pages_per_batch); 641 else 642 netif_dbg(efx, drv, efx->net_dev, 643 "RX buf len=%u step=%u bpp=%u; page batch=%u\n", 644 efx->rx_dma_len, efx->rx_page_buf_step, 645 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 646 647 /* RX filters may also have scatter-enabled flags */ 648 if (efx->rx_scatter != old_rx_scatter) 649 efx->type->filter_update_rx_scatter(efx); 650 651 /* We must keep at least one descriptor in a TX ring empty. 652 * We could avoid this when the queue size does not exactly 653 * match the hardware ring size, but it's not that important. 654 * Therefore we stop the queue when one more skb might fill 655 * the ring completely. We wake it when half way back to 656 * empty. 657 */ 658 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); 659 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; 660 661 /* Initialise the channels */ 662 efx_for_each_channel(channel, efx) { 663 efx_for_each_channel_tx_queue(tx_queue, channel) { 664 efx_init_tx_queue(tx_queue); 665 atomic_inc(&efx->active_queues); 666 } 667 668 efx_for_each_channel_rx_queue(rx_queue, channel) { 669 efx_init_rx_queue(rx_queue); 670 atomic_inc(&efx->active_queues); 671 efx_stop_eventq(channel); 672 efx_fast_push_rx_descriptors(rx_queue, false); 673 efx_start_eventq(channel); 674 } 675 676 WARN_ON(channel->rx_pkt_n_frags); 677 } 678 679 efx_ptp_start_datapath(efx); 680 681 if (netif_device_present(efx->net_dev)) 682 netif_tx_wake_all_queues(efx->net_dev); 683 } 684 685 static void efx_stop_datapath(struct efx_nic *efx) 686 { 687 struct efx_channel *channel; 688 struct efx_tx_queue *tx_queue; 689 struct efx_rx_queue *rx_queue; 690 int rc; 691 692 EFX_ASSERT_RESET_SERIALISED(efx); 693 BUG_ON(efx->port_enabled); 694 695 efx_ptp_stop_datapath(efx); 696 697 /* Stop RX refill */ 698 efx_for_each_channel(channel, efx) { 699 efx_for_each_channel_rx_queue(rx_queue, channel) 700 rx_queue->refill_enabled = false; 701 } 702 703 efx_for_each_channel(channel, efx) { 704 /* RX packet processing is pipelined, so wait for the 705 * NAPI handler to complete. At least event queue 0 706 * might be kept active by non-data events, so don't 707 * use napi_synchronize() but actually disable NAPI 708 * temporarily. 709 */ 710 if (efx_channel_has_rx_queue(channel)) { 711 efx_stop_eventq(channel); 712 efx_start_eventq(channel); 713 } 714 } 715 716 rc = efx->type->fini_dmaq(efx); 717 if (rc && EFX_WORKAROUND_7803(efx)) { 718 /* Schedule a reset to recover from the flush failure. The 719 * descriptor caches reference memory we're about to free, 720 * but falcon_reconfigure_mac_wrapper() won't reconnect 721 * the MACs because of the pending reset. 722 */ 723 netif_err(efx, drv, efx->net_dev, 724 "Resetting to recover from flush failure\n"); 725 efx_schedule_reset(efx, RESET_TYPE_ALL); 726 } else if (rc) { 727 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 728 } else { 729 netif_dbg(efx, drv, efx->net_dev, 730 "successfully flushed all queues\n"); 731 } 732 733 efx_for_each_channel(channel, efx) { 734 efx_for_each_channel_rx_queue(rx_queue, channel) 735 efx_fini_rx_queue(rx_queue); 736 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 737 efx_fini_tx_queue(tx_queue); 738 } 739 } 740 741 static void efx_remove_channel(struct efx_channel *channel) 742 { 743 struct efx_tx_queue *tx_queue; 744 struct efx_rx_queue *rx_queue; 745 746 netif_dbg(channel->efx, drv, channel->efx->net_dev, 747 "destroy chan %d\n", channel->channel); 748 749 efx_for_each_channel_rx_queue(rx_queue, channel) 750 efx_remove_rx_queue(rx_queue); 751 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 752 efx_remove_tx_queue(tx_queue); 753 efx_remove_eventq(channel); 754 channel->type->post_remove(channel); 755 } 756 757 static void efx_remove_channels(struct efx_nic *efx) 758 { 759 struct efx_channel *channel; 760 761 efx_for_each_channel(channel, efx) 762 efx_remove_channel(channel); 763 } 764 765 int 766 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) 767 { 768 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 769 u32 old_rxq_entries, old_txq_entries; 770 unsigned i, next_buffer_table = 0; 771 int rc, rc2; 772 773 rc = efx_check_disabled(efx); 774 if (rc) 775 return rc; 776 777 /* Not all channels should be reallocated. We must avoid 778 * reallocating their buffer table entries. 779 */ 780 efx_for_each_channel(channel, efx) { 781 struct efx_rx_queue *rx_queue; 782 struct efx_tx_queue *tx_queue; 783 784 if (channel->type->copy) 785 continue; 786 next_buffer_table = max(next_buffer_table, 787 channel->eventq.index + 788 channel->eventq.entries); 789 efx_for_each_channel_rx_queue(rx_queue, channel) 790 next_buffer_table = max(next_buffer_table, 791 rx_queue->rxd.index + 792 rx_queue->rxd.entries); 793 efx_for_each_channel_tx_queue(tx_queue, channel) 794 next_buffer_table = max(next_buffer_table, 795 tx_queue->txd.index + 796 tx_queue->txd.entries); 797 } 798 799 efx_device_detach_sync(efx); 800 efx_stop_all(efx); 801 efx_soft_disable_interrupts(efx); 802 803 /* Clone channels (where possible) */ 804 memset(other_channel, 0, sizeof(other_channel)); 805 for (i = 0; i < efx->n_channels; i++) { 806 channel = efx->channel[i]; 807 if (channel->type->copy) 808 channel = channel->type->copy(channel); 809 if (!channel) { 810 rc = -ENOMEM; 811 goto out; 812 } 813 other_channel[i] = channel; 814 } 815 816 /* Swap entry counts and channel pointers */ 817 old_rxq_entries = efx->rxq_entries; 818 old_txq_entries = efx->txq_entries; 819 efx->rxq_entries = rxq_entries; 820 efx->txq_entries = txq_entries; 821 for (i = 0; i < efx->n_channels; i++) { 822 channel = efx->channel[i]; 823 efx->channel[i] = other_channel[i]; 824 other_channel[i] = channel; 825 } 826 827 /* Restart buffer table allocation */ 828 efx->next_buffer_table = next_buffer_table; 829 830 for (i = 0; i < efx->n_channels; i++) { 831 channel = efx->channel[i]; 832 if (!channel->type->copy) 833 continue; 834 rc = efx_probe_channel(channel); 835 if (rc) 836 goto rollback; 837 efx_init_napi_channel(efx->channel[i]); 838 } 839 840 out: 841 /* Destroy unused channel structures */ 842 for (i = 0; i < efx->n_channels; i++) { 843 channel = other_channel[i]; 844 if (channel && channel->type->copy) { 845 efx_fini_napi_channel(channel); 846 efx_remove_channel(channel); 847 kfree(channel); 848 } 849 } 850 851 rc2 = efx_soft_enable_interrupts(efx); 852 if (rc2) { 853 rc = rc ? rc : rc2; 854 netif_err(efx, drv, efx->net_dev, 855 "unable to restart interrupts on channel reallocation\n"); 856 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 857 } else { 858 efx_start_all(efx); 859 netif_device_attach(efx->net_dev); 860 } 861 return rc; 862 863 rollback: 864 /* Swap back */ 865 efx->rxq_entries = old_rxq_entries; 866 efx->txq_entries = old_txq_entries; 867 for (i = 0; i < efx->n_channels; i++) { 868 channel = efx->channel[i]; 869 efx->channel[i] = other_channel[i]; 870 other_channel[i] = channel; 871 } 872 goto out; 873 } 874 875 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 876 { 877 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 878 } 879 880 static const struct efx_channel_type efx_default_channel_type = { 881 .pre_probe = efx_channel_dummy_op_int, 882 .post_remove = efx_channel_dummy_op_void, 883 .get_name = efx_get_channel_name, 884 .copy = efx_copy_channel, 885 .keep_eventq = false, 886 }; 887 888 int efx_channel_dummy_op_int(struct efx_channel *channel) 889 { 890 return 0; 891 } 892 893 void efx_channel_dummy_op_void(struct efx_channel *channel) 894 { 895 } 896 897 /************************************************************************** 898 * 899 * Port handling 900 * 901 **************************************************************************/ 902 903 /* This ensures that the kernel is kept informed (via 904 * netif_carrier_on/off) of the link status, and also maintains the 905 * link status's stop on the port's TX queue. 906 */ 907 void efx_link_status_changed(struct efx_nic *efx) 908 { 909 struct efx_link_state *link_state = &efx->link_state; 910 911 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 912 * that no events are triggered between unregister_netdev() and the 913 * driver unloading. A more general condition is that NETDEV_CHANGE 914 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 915 if (!netif_running(efx->net_dev)) 916 return; 917 918 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 919 efx->n_link_state_changes++; 920 921 if (link_state->up) 922 netif_carrier_on(efx->net_dev); 923 else 924 netif_carrier_off(efx->net_dev); 925 } 926 927 /* Status message for kernel log */ 928 if (link_state->up) 929 netif_info(efx, link, efx->net_dev, 930 "link up at %uMbps %s-duplex (MTU %d)\n", 931 link_state->speed, link_state->fd ? "full" : "half", 932 efx->net_dev->mtu); 933 else 934 netif_info(efx, link, efx->net_dev, "link down\n"); 935 } 936 937 void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 938 { 939 efx->link_advertising = advertising; 940 if (advertising) { 941 if (advertising & ADVERTISED_Pause) 942 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); 943 else 944 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 945 if (advertising & ADVERTISED_Asym_Pause) 946 efx->wanted_fc ^= EFX_FC_TX; 947 } 948 } 949 950 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) 951 { 952 efx->wanted_fc = wanted_fc; 953 if (efx->link_advertising) { 954 if (wanted_fc & EFX_FC_RX) 955 efx->link_advertising |= (ADVERTISED_Pause | 956 ADVERTISED_Asym_Pause); 957 else 958 efx->link_advertising &= ~(ADVERTISED_Pause | 959 ADVERTISED_Asym_Pause); 960 if (wanted_fc & EFX_FC_TX) 961 efx->link_advertising ^= ADVERTISED_Asym_Pause; 962 } 963 } 964 965 static void efx_fini_port(struct efx_nic *efx); 966 967 /* We assume that efx->type->reconfigure_mac will always try to sync RX 968 * filters and therefore needs to read-lock the filter table against freeing 969 */ 970 void efx_mac_reconfigure(struct efx_nic *efx) 971 { 972 down_read(&efx->filter_sem); 973 efx->type->reconfigure_mac(efx); 974 up_read(&efx->filter_sem); 975 } 976 977 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure 978 * the MAC appropriately. All other PHY configuration changes are pushed 979 * through phy_op->set_settings(), and pushed asynchronously to the MAC 980 * through efx_monitor(). 981 * 982 * Callers must hold the mac_lock 983 */ 984 int __efx_reconfigure_port(struct efx_nic *efx) 985 { 986 enum efx_phy_mode phy_mode; 987 int rc; 988 989 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 990 991 /* Disable PHY transmit in mac level loopbacks */ 992 phy_mode = efx->phy_mode; 993 if (LOOPBACK_INTERNAL(efx)) 994 efx->phy_mode |= PHY_MODE_TX_DISABLED; 995 else 996 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 997 998 rc = efx->type->reconfigure_port(efx); 999 1000 if (rc) 1001 efx->phy_mode = phy_mode; 1002 1003 return rc; 1004 } 1005 1006 /* Reinitialise the MAC to pick up new PHY settings, even if the port is 1007 * disabled. */ 1008 int efx_reconfigure_port(struct efx_nic *efx) 1009 { 1010 int rc; 1011 1012 EFX_ASSERT_RESET_SERIALISED(efx); 1013 1014 mutex_lock(&efx->mac_lock); 1015 rc = __efx_reconfigure_port(efx); 1016 mutex_unlock(&efx->mac_lock); 1017 1018 return rc; 1019 } 1020 1021 /* Asynchronous work item for changing MAC promiscuity and multicast 1022 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 1023 * MAC directly. */ 1024 static void efx_mac_work(struct work_struct *data) 1025 { 1026 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 1027 1028 mutex_lock(&efx->mac_lock); 1029 if (efx->port_enabled) 1030 efx_mac_reconfigure(efx); 1031 mutex_unlock(&efx->mac_lock); 1032 } 1033 1034 static int efx_probe_port(struct efx_nic *efx) 1035 { 1036 int rc; 1037 1038 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 1039 1040 if (phy_flash_cfg) 1041 efx->phy_mode = PHY_MODE_SPECIAL; 1042 1043 /* Connect up MAC/PHY operations table */ 1044 rc = efx->type->probe_port(efx); 1045 if (rc) 1046 return rc; 1047 1048 /* Initialise MAC address to permanent address */ 1049 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); 1050 1051 return 0; 1052 } 1053 1054 static int efx_init_port(struct efx_nic *efx) 1055 { 1056 int rc; 1057 1058 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 1059 1060 mutex_lock(&efx->mac_lock); 1061 1062 rc = efx->phy_op->init(efx); 1063 if (rc) 1064 goto fail1; 1065 1066 efx->port_initialized = true; 1067 1068 /* Reconfigure the MAC before creating dma queues (required for 1069 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 1070 efx_mac_reconfigure(efx); 1071 1072 /* Ensure the PHY advertises the correct flow control settings */ 1073 rc = efx->phy_op->reconfigure(efx); 1074 if (rc && rc != -EPERM) 1075 goto fail2; 1076 1077 mutex_unlock(&efx->mac_lock); 1078 return 0; 1079 1080 fail2: 1081 efx->phy_op->fini(efx); 1082 fail1: 1083 mutex_unlock(&efx->mac_lock); 1084 return rc; 1085 } 1086 1087 static void efx_start_port(struct efx_nic *efx) 1088 { 1089 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 1090 BUG_ON(efx->port_enabled); 1091 1092 mutex_lock(&efx->mac_lock); 1093 efx->port_enabled = true; 1094 1095 /* Ensure MAC ingress/egress is enabled */ 1096 efx_mac_reconfigure(efx); 1097 1098 mutex_unlock(&efx->mac_lock); 1099 } 1100 1101 /* Cancel work for MAC reconfiguration, periodic hardware monitoring 1102 * and the async self-test, wait for them to finish and prevent them 1103 * being scheduled again. This doesn't cover online resets, which 1104 * should only be cancelled when removing the device. 1105 */ 1106 static void efx_stop_port(struct efx_nic *efx) 1107 { 1108 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 1109 1110 EFX_ASSERT_RESET_SERIALISED(efx); 1111 1112 mutex_lock(&efx->mac_lock); 1113 efx->port_enabled = false; 1114 mutex_unlock(&efx->mac_lock); 1115 1116 /* Serialise against efx_set_multicast_list() */ 1117 netif_addr_lock_bh(efx->net_dev); 1118 netif_addr_unlock_bh(efx->net_dev); 1119 1120 cancel_delayed_work_sync(&efx->monitor_work); 1121 efx_selftest_async_cancel(efx); 1122 cancel_work_sync(&efx->mac_work); 1123 } 1124 1125 static void efx_fini_port(struct efx_nic *efx) 1126 { 1127 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 1128 1129 if (!efx->port_initialized) 1130 return; 1131 1132 efx->phy_op->fini(efx); 1133 efx->port_initialized = false; 1134 1135 efx->link_state.up = false; 1136 efx_link_status_changed(efx); 1137 } 1138 1139 static void efx_remove_port(struct efx_nic *efx) 1140 { 1141 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 1142 1143 efx->type->remove_port(efx); 1144 } 1145 1146 /************************************************************************** 1147 * 1148 * NIC handling 1149 * 1150 **************************************************************************/ 1151 1152 static LIST_HEAD(efx_primary_list); 1153 static LIST_HEAD(efx_unassociated_list); 1154 1155 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 1156 { 1157 return left->type == right->type && 1158 left->vpd_sn && right->vpd_sn && 1159 !strcmp(left->vpd_sn, right->vpd_sn); 1160 } 1161 1162 static void efx_associate(struct efx_nic *efx) 1163 { 1164 struct efx_nic *other, *next; 1165 1166 if (efx->primary == efx) { 1167 /* Adding primary function; look for secondaries */ 1168 1169 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 1170 list_add_tail(&efx->node, &efx_primary_list); 1171 1172 list_for_each_entry_safe(other, next, &efx_unassociated_list, 1173 node) { 1174 if (efx_same_controller(efx, other)) { 1175 list_del(&other->node); 1176 netif_dbg(other, probe, other->net_dev, 1177 "moving to secondary list of %s %s\n", 1178 pci_name(efx->pci_dev), 1179 efx->net_dev->name); 1180 list_add_tail(&other->node, 1181 &efx->secondary_list); 1182 other->primary = efx; 1183 } 1184 } 1185 } else { 1186 /* Adding secondary function; look for primary */ 1187 1188 list_for_each_entry(other, &efx_primary_list, node) { 1189 if (efx_same_controller(efx, other)) { 1190 netif_dbg(efx, probe, efx->net_dev, 1191 "adding to secondary list of %s %s\n", 1192 pci_name(other->pci_dev), 1193 other->net_dev->name); 1194 list_add_tail(&efx->node, 1195 &other->secondary_list); 1196 efx->primary = other; 1197 return; 1198 } 1199 } 1200 1201 netif_dbg(efx, probe, efx->net_dev, 1202 "adding to unassociated list\n"); 1203 list_add_tail(&efx->node, &efx_unassociated_list); 1204 } 1205 } 1206 1207 static void efx_dissociate(struct efx_nic *efx) 1208 { 1209 struct efx_nic *other, *next; 1210 1211 list_del(&efx->node); 1212 efx->primary = NULL; 1213 1214 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 1215 list_del(&other->node); 1216 netif_dbg(other, probe, other->net_dev, 1217 "moving to unassociated list\n"); 1218 list_add_tail(&other->node, &efx_unassociated_list); 1219 other->primary = NULL; 1220 } 1221 } 1222 1223 /* This configures the PCI device to enable I/O and DMA. */ 1224 static int efx_init_io(struct efx_nic *efx) 1225 { 1226 struct pci_dev *pci_dev = efx->pci_dev; 1227 dma_addr_t dma_mask = efx->type->max_dma_mask; 1228 unsigned int mem_map_size = efx->type->mem_map_size(efx); 1229 int rc, bar; 1230 1231 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1232 1233 bar = efx->type->mem_bar; 1234 1235 rc = pci_enable_device(pci_dev); 1236 if (rc) { 1237 netif_err(efx, probe, efx->net_dev, 1238 "failed to enable PCI device\n"); 1239 goto fail1; 1240 } 1241 1242 pci_set_master(pci_dev); 1243 1244 /* Set the PCI DMA mask. Try all possibilities from our 1245 * genuine mask down to 32 bits, because some architectures 1246 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 1247 * masks event though they reject 46 bit masks. 1248 */ 1249 while (dma_mask > 0x7fffffffUL) { 1250 if (dma_supported(&pci_dev->dev, dma_mask)) { 1251 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); 1252 if (rc == 0) 1253 break; 1254 } 1255 dma_mask >>= 1; 1256 } 1257 if (rc) { 1258 netif_err(efx, probe, efx->net_dev, 1259 "could not find a suitable DMA mask\n"); 1260 goto fail2; 1261 } 1262 netif_dbg(efx, probe, efx->net_dev, 1263 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1264 1265 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); 1266 rc = pci_request_region(pci_dev, bar, "sfc"); 1267 if (rc) { 1268 netif_err(efx, probe, efx->net_dev, 1269 "request for memory BAR failed\n"); 1270 rc = -EIO; 1271 goto fail3; 1272 } 1273 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); 1274 if (!efx->membase) { 1275 netif_err(efx, probe, efx->net_dev, 1276 "could not map memory BAR at %llx+%x\n", 1277 (unsigned long long)efx->membase_phys, mem_map_size); 1278 rc = -ENOMEM; 1279 goto fail4; 1280 } 1281 netif_dbg(efx, probe, efx->net_dev, 1282 "memory BAR at %llx+%x (virtual %p)\n", 1283 (unsigned long long)efx->membase_phys, mem_map_size, 1284 efx->membase); 1285 1286 return 0; 1287 1288 fail4: 1289 pci_release_region(efx->pci_dev, bar); 1290 fail3: 1291 efx->membase_phys = 0; 1292 fail2: 1293 pci_disable_device(efx->pci_dev); 1294 fail1: 1295 return rc; 1296 } 1297 1298 static void efx_fini_io(struct efx_nic *efx) 1299 { 1300 int bar; 1301 1302 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 1303 1304 if (efx->membase) { 1305 iounmap(efx->membase); 1306 efx->membase = NULL; 1307 } 1308 1309 if (efx->membase_phys) { 1310 bar = efx->type->mem_bar; 1311 pci_release_region(efx->pci_dev, bar); 1312 efx->membase_phys = 0; 1313 } 1314 1315 /* Don't disable bus-mastering if VFs are assigned */ 1316 if (!pci_vfs_assigned(efx->pci_dev)) 1317 pci_disable_device(efx->pci_dev); 1318 } 1319 1320 void efx_set_default_rx_indir_table(struct efx_nic *efx) 1321 { 1322 size_t i; 1323 1324 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1325 efx->rx_indir_table[i] = 1326 ethtool_rxfh_indir_default(i, efx->rss_spread); 1327 } 1328 1329 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) 1330 { 1331 cpumask_var_t thread_mask; 1332 unsigned int count; 1333 int cpu; 1334 1335 if (rss_cpus) { 1336 count = rss_cpus; 1337 } else { 1338 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 1339 netif_warn(efx, probe, efx->net_dev, 1340 "RSS disabled due to allocation failure\n"); 1341 return 1; 1342 } 1343 1344 count = 0; 1345 for_each_online_cpu(cpu) { 1346 if (!cpumask_test_cpu(cpu, thread_mask)) { 1347 ++count; 1348 cpumask_or(thread_mask, thread_mask, 1349 topology_sibling_cpumask(cpu)); 1350 } 1351 } 1352 1353 free_cpumask_var(thread_mask); 1354 } 1355 1356 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1357 * table entries that are inaccessible to VFs 1358 */ 1359 #ifdef CONFIG_SFC_SRIOV 1360 if (efx->type->sriov_wanted) { 1361 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && 1362 count > efx_vf_size(efx)) { 1363 netif_warn(efx, probe, efx->net_dev, 1364 "Reducing number of RSS channels from %u to %u for " 1365 "VF support. Increase vf-msix-limit to use more " 1366 "channels on the PF.\n", 1367 count, efx_vf_size(efx)); 1368 count = efx_vf_size(efx); 1369 } 1370 } 1371 #endif 1372 1373 return count; 1374 } 1375 1376 /* Probe the number and type of interrupts we are able to obtain, and 1377 * the resulting numbers of channels and RX queues. 1378 */ 1379 static int efx_probe_interrupts(struct efx_nic *efx) 1380 { 1381 unsigned int extra_channels = 0; 1382 unsigned int i, j; 1383 int rc; 1384 1385 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) 1386 if (efx->extra_channel_type[i]) 1387 ++extra_channels; 1388 1389 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1390 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1391 unsigned int n_channels; 1392 1393 n_channels = efx_wanted_parallelism(efx); 1394 if (separate_tx_channels) 1395 n_channels *= 2; 1396 n_channels += extra_channels; 1397 n_channels = min(n_channels, efx->max_channels); 1398 1399 for (i = 0; i < n_channels; i++) 1400 xentries[i].entry = i; 1401 rc = pci_enable_msix_range(efx->pci_dev, 1402 xentries, 1, n_channels); 1403 if (rc < 0) { 1404 /* Fall back to single channel MSI */ 1405 efx->interrupt_mode = EFX_INT_MODE_MSI; 1406 netif_err(efx, drv, efx->net_dev, 1407 "could not enable MSI-X\n"); 1408 } else if (rc < n_channels) { 1409 netif_err(efx, drv, efx->net_dev, 1410 "WARNING: Insufficient MSI-X vectors" 1411 " available (%d < %u).\n", rc, n_channels); 1412 netif_err(efx, drv, efx->net_dev, 1413 "WARNING: Performance may be reduced.\n"); 1414 n_channels = rc; 1415 } 1416 1417 if (rc > 0) { 1418 efx->n_channels = n_channels; 1419 if (n_channels > extra_channels) 1420 n_channels -= extra_channels; 1421 if (separate_tx_channels) { 1422 efx->n_tx_channels = max(n_channels / 2, 1U); 1423 efx->n_rx_channels = max(n_channels - 1424 efx->n_tx_channels, 1425 1U); 1426 } else { 1427 efx->n_tx_channels = n_channels; 1428 efx->n_rx_channels = n_channels; 1429 } 1430 for (i = 0; i < efx->n_channels; i++) 1431 efx_get_channel(efx, i)->irq = 1432 xentries[i].vector; 1433 } 1434 } 1435 1436 /* Try single interrupt MSI */ 1437 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1438 efx->n_channels = 1; 1439 efx->n_rx_channels = 1; 1440 efx->n_tx_channels = 1; 1441 rc = pci_enable_msi(efx->pci_dev); 1442 if (rc == 0) { 1443 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; 1444 } else { 1445 netif_err(efx, drv, efx->net_dev, 1446 "could not enable MSI\n"); 1447 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1448 } 1449 } 1450 1451 /* Assume legacy interrupts */ 1452 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1453 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1454 efx->n_rx_channels = 1; 1455 efx->n_tx_channels = 1; 1456 efx->legacy_irq = efx->pci_dev->irq; 1457 } 1458 1459 /* Assign extra channels if possible */ 1460 j = efx->n_channels; 1461 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { 1462 if (!efx->extra_channel_type[i]) 1463 continue; 1464 if (efx->interrupt_mode != EFX_INT_MODE_MSIX || 1465 efx->n_channels <= extra_channels) { 1466 efx->extra_channel_type[i]->handle_no_channel(efx); 1467 } else { 1468 --j; 1469 efx_get_channel(efx, j)->type = 1470 efx->extra_channel_type[i]; 1471 } 1472 } 1473 1474 /* RSS might be usable on VFs even if it is disabled on the PF */ 1475 #ifdef CONFIG_SFC_SRIOV 1476 if (efx->type->sriov_wanted) { 1477 efx->rss_spread = ((efx->n_rx_channels > 1 || 1478 !efx->type->sriov_wanted(efx)) ? 1479 efx->n_rx_channels : efx_vf_size(efx)); 1480 return 0; 1481 } 1482 #endif 1483 efx->rss_spread = efx->n_rx_channels; 1484 1485 return 0; 1486 } 1487 1488 static int efx_soft_enable_interrupts(struct efx_nic *efx) 1489 { 1490 struct efx_channel *channel, *end_channel; 1491 int rc; 1492 1493 BUG_ON(efx->state == STATE_DISABLED); 1494 1495 efx->irq_soft_enabled = true; 1496 smp_wmb(); 1497 1498 efx_for_each_channel(channel, efx) { 1499 if (!channel->type->keep_eventq) { 1500 rc = efx_init_eventq(channel); 1501 if (rc) 1502 goto fail; 1503 } 1504 efx_start_eventq(channel); 1505 } 1506 1507 efx_mcdi_mode_event(efx); 1508 1509 return 0; 1510 fail: 1511 end_channel = channel; 1512 efx_for_each_channel(channel, efx) { 1513 if (channel == end_channel) 1514 break; 1515 efx_stop_eventq(channel); 1516 if (!channel->type->keep_eventq) 1517 efx_fini_eventq(channel); 1518 } 1519 1520 return rc; 1521 } 1522 1523 static void efx_soft_disable_interrupts(struct efx_nic *efx) 1524 { 1525 struct efx_channel *channel; 1526 1527 if (efx->state == STATE_DISABLED) 1528 return; 1529 1530 efx_mcdi_mode_poll(efx); 1531 1532 efx->irq_soft_enabled = false; 1533 smp_wmb(); 1534 1535 if (efx->legacy_irq) 1536 synchronize_irq(efx->legacy_irq); 1537 1538 efx_for_each_channel(channel, efx) { 1539 if (channel->irq) 1540 synchronize_irq(channel->irq); 1541 1542 efx_stop_eventq(channel); 1543 if (!channel->type->keep_eventq) 1544 efx_fini_eventq(channel); 1545 } 1546 1547 /* Flush the asynchronous MCDI request queue */ 1548 efx_mcdi_flush_async(efx); 1549 } 1550 1551 static int efx_enable_interrupts(struct efx_nic *efx) 1552 { 1553 struct efx_channel *channel, *end_channel; 1554 int rc; 1555 1556 BUG_ON(efx->state == STATE_DISABLED); 1557 1558 if (efx->eeh_disabled_legacy_irq) { 1559 enable_irq(efx->legacy_irq); 1560 efx->eeh_disabled_legacy_irq = false; 1561 } 1562 1563 efx->type->irq_enable_master(efx); 1564 1565 efx_for_each_channel(channel, efx) { 1566 if (channel->type->keep_eventq) { 1567 rc = efx_init_eventq(channel); 1568 if (rc) 1569 goto fail; 1570 } 1571 } 1572 1573 rc = efx_soft_enable_interrupts(efx); 1574 if (rc) 1575 goto fail; 1576 1577 return 0; 1578 1579 fail: 1580 end_channel = channel; 1581 efx_for_each_channel(channel, efx) { 1582 if (channel == end_channel) 1583 break; 1584 if (channel->type->keep_eventq) 1585 efx_fini_eventq(channel); 1586 } 1587 1588 efx->type->irq_disable_non_ev(efx); 1589 1590 return rc; 1591 } 1592 1593 static void efx_disable_interrupts(struct efx_nic *efx) 1594 { 1595 struct efx_channel *channel; 1596 1597 efx_soft_disable_interrupts(efx); 1598 1599 efx_for_each_channel(channel, efx) { 1600 if (channel->type->keep_eventq) 1601 efx_fini_eventq(channel); 1602 } 1603 1604 efx->type->irq_disable_non_ev(efx); 1605 } 1606 1607 static void efx_remove_interrupts(struct efx_nic *efx) 1608 { 1609 struct efx_channel *channel; 1610 1611 /* Remove MSI/MSI-X interrupts */ 1612 efx_for_each_channel(channel, efx) 1613 channel->irq = 0; 1614 pci_disable_msi(efx->pci_dev); 1615 pci_disable_msix(efx->pci_dev); 1616 1617 /* Remove legacy interrupt */ 1618 efx->legacy_irq = 0; 1619 } 1620 1621 static void efx_set_channels(struct efx_nic *efx) 1622 { 1623 struct efx_channel *channel; 1624 struct efx_tx_queue *tx_queue; 1625 1626 efx->tx_channel_offset = 1627 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1628 1629 /* We need to mark which channels really have RX and TX 1630 * queues, and adjust the TX queue numbers if we have separate 1631 * RX-only and TX-only channels. 1632 */ 1633 efx_for_each_channel(channel, efx) { 1634 if (channel->channel < efx->n_rx_channels) 1635 channel->rx_queue.core_index = channel->channel; 1636 else 1637 channel->rx_queue.core_index = -1; 1638 1639 efx_for_each_channel_tx_queue(tx_queue, channel) 1640 tx_queue->queue -= (efx->tx_channel_offset * 1641 EFX_TXQ_TYPES); 1642 } 1643 } 1644 1645 static int efx_probe_nic(struct efx_nic *efx) 1646 { 1647 int rc; 1648 1649 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 1650 1651 /* Carry out hardware-type specific initialisation */ 1652 rc = efx->type->probe(efx); 1653 if (rc) 1654 return rc; 1655 1656 /* Determine the number of channels and queues by trying to hook 1657 * in MSI-X interrupts. */ 1658 rc = efx_probe_interrupts(efx); 1659 if (rc) 1660 goto fail1; 1661 1662 efx_set_channels(efx); 1663 1664 rc = efx->type->dimension_resources(efx); 1665 if (rc) 1666 goto fail2; 1667 1668 if (efx->n_channels > 1) 1669 netdev_rss_key_fill(&efx->rx_hash_key, 1670 sizeof(efx->rx_hash_key)); 1671 efx_set_default_rx_indir_table(efx); 1672 1673 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1674 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1675 1676 /* Initialise the interrupt moderation settings */ 1677 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 1678 true); 1679 1680 return 0; 1681 1682 fail2: 1683 efx_remove_interrupts(efx); 1684 fail1: 1685 efx->type->remove(efx); 1686 return rc; 1687 } 1688 1689 static void efx_remove_nic(struct efx_nic *efx) 1690 { 1691 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 1692 1693 efx_remove_interrupts(efx); 1694 efx->type->remove(efx); 1695 } 1696 1697 static int efx_probe_filters(struct efx_nic *efx) 1698 { 1699 int rc; 1700 1701 spin_lock_init(&efx->filter_lock); 1702 init_rwsem(&efx->filter_sem); 1703 down_write(&efx->filter_sem); 1704 rc = efx->type->filter_table_probe(efx); 1705 if (rc) 1706 goto out_unlock; 1707 1708 #ifdef CONFIG_RFS_ACCEL 1709 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1710 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, 1711 sizeof(*efx->rps_flow_id), 1712 GFP_KERNEL); 1713 if (!efx->rps_flow_id) { 1714 efx->type->filter_table_remove(efx); 1715 rc = -ENOMEM; 1716 goto out_unlock; 1717 } 1718 } 1719 #endif 1720 out_unlock: 1721 up_write(&efx->filter_sem); 1722 return rc; 1723 } 1724 1725 static void efx_remove_filters(struct efx_nic *efx) 1726 { 1727 #ifdef CONFIG_RFS_ACCEL 1728 kfree(efx->rps_flow_id); 1729 #endif 1730 down_write(&efx->filter_sem); 1731 efx->type->filter_table_remove(efx); 1732 up_write(&efx->filter_sem); 1733 } 1734 1735 static void efx_restore_filters(struct efx_nic *efx) 1736 { 1737 down_read(&efx->filter_sem); 1738 efx->type->filter_table_restore(efx); 1739 up_read(&efx->filter_sem); 1740 } 1741 1742 /************************************************************************** 1743 * 1744 * NIC startup/shutdown 1745 * 1746 *************************************************************************/ 1747 1748 static int efx_probe_all(struct efx_nic *efx) 1749 { 1750 int rc; 1751 1752 rc = efx_probe_nic(efx); 1753 if (rc) { 1754 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1755 goto fail1; 1756 } 1757 1758 rc = efx_probe_port(efx); 1759 if (rc) { 1760 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1761 goto fail2; 1762 } 1763 1764 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 1765 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 1766 rc = -EINVAL; 1767 goto fail3; 1768 } 1769 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1770 1771 #ifdef CONFIG_SFC_SRIOV 1772 rc = efx->type->vswitching_probe(efx); 1773 if (rc) /* not fatal; the PF will still work fine */ 1774 netif_warn(efx, probe, efx->net_dev, 1775 "failed to setup vswitching rc=%d;" 1776 " VFs may not function\n", rc); 1777 #endif 1778 1779 rc = efx_probe_filters(efx); 1780 if (rc) { 1781 netif_err(efx, probe, efx->net_dev, 1782 "failed to create filter tables\n"); 1783 goto fail4; 1784 } 1785 1786 rc = efx_probe_channels(efx); 1787 if (rc) 1788 goto fail5; 1789 1790 return 0; 1791 1792 fail5: 1793 efx_remove_filters(efx); 1794 fail4: 1795 #ifdef CONFIG_SFC_SRIOV 1796 efx->type->vswitching_remove(efx); 1797 #endif 1798 fail3: 1799 efx_remove_port(efx); 1800 fail2: 1801 efx_remove_nic(efx); 1802 fail1: 1803 return rc; 1804 } 1805 1806 /* If the interface is supposed to be running but is not, start 1807 * the hardware and software data path, regular activity for the port 1808 * (MAC statistics, link polling, etc.) and schedule the port to be 1809 * reconfigured. Interrupts must already be enabled. This function 1810 * is safe to call multiple times, so long as the NIC is not disabled. 1811 * Requires the RTNL lock. 1812 */ 1813 static void efx_start_all(struct efx_nic *efx) 1814 { 1815 EFX_ASSERT_RESET_SERIALISED(efx); 1816 BUG_ON(efx->state == STATE_DISABLED); 1817 1818 /* Check that it is appropriate to restart the interface. All 1819 * of these flags are safe to read under just the rtnl lock */ 1820 if (efx->port_enabled || !netif_running(efx->net_dev) || 1821 efx->reset_pending) 1822 return; 1823 1824 efx_start_port(efx); 1825 efx_start_datapath(efx); 1826 1827 /* Start the hardware monitor if there is one */ 1828 if (efx->type->monitor != NULL) 1829 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1830 efx_monitor_interval); 1831 1832 /* If link state detection is normally event-driven, we have 1833 * to poll now because we could have missed a change 1834 */ 1835 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1836 mutex_lock(&efx->mac_lock); 1837 if (efx->phy_op->poll(efx)) 1838 efx_link_status_changed(efx); 1839 mutex_unlock(&efx->mac_lock); 1840 } 1841 1842 efx->type->start_stats(efx); 1843 efx->type->pull_stats(efx); 1844 spin_lock_bh(&efx->stats_lock); 1845 efx->type->update_stats(efx, NULL, NULL); 1846 spin_unlock_bh(&efx->stats_lock); 1847 } 1848 1849 /* Quiesce the hardware and software data path, and regular activity 1850 * for the port without bringing the link down. Safe to call multiple 1851 * times with the NIC in almost any state, but interrupts should be 1852 * enabled. Requires the RTNL lock. 1853 */ 1854 static void efx_stop_all(struct efx_nic *efx) 1855 { 1856 EFX_ASSERT_RESET_SERIALISED(efx); 1857 1858 /* port_enabled can be read safely under the rtnl lock */ 1859 if (!efx->port_enabled) 1860 return; 1861 1862 /* update stats before we go down so we can accurately count 1863 * rx_nodesc_drops 1864 */ 1865 efx->type->pull_stats(efx); 1866 spin_lock_bh(&efx->stats_lock); 1867 efx->type->update_stats(efx, NULL, NULL); 1868 spin_unlock_bh(&efx->stats_lock); 1869 efx->type->stop_stats(efx); 1870 efx_stop_port(efx); 1871 1872 /* Stop the kernel transmit interface. This is only valid if 1873 * the device is stopped or detached; otherwise the watchdog 1874 * may fire immediately. 1875 */ 1876 WARN_ON(netif_running(efx->net_dev) && 1877 netif_device_present(efx->net_dev)); 1878 netif_tx_disable(efx->net_dev); 1879 1880 efx_stop_datapath(efx); 1881 } 1882 1883 static void efx_remove_all(struct efx_nic *efx) 1884 { 1885 efx_remove_channels(efx); 1886 efx_remove_filters(efx); 1887 #ifdef CONFIG_SFC_SRIOV 1888 efx->type->vswitching_remove(efx); 1889 #endif 1890 efx_remove_port(efx); 1891 efx_remove_nic(efx); 1892 } 1893 1894 /************************************************************************** 1895 * 1896 * Interrupt moderation 1897 * 1898 **************************************************************************/ 1899 1900 static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) 1901 { 1902 if (usecs == 0) 1903 return 0; 1904 if (usecs * 1000 < quantum_ns) 1905 return 1; /* never round down to 0 */ 1906 return usecs * 1000 / quantum_ns; 1907 } 1908 1909 /* Set interrupt moderation parameters */ 1910 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 1911 unsigned int rx_usecs, bool rx_adaptive, 1912 bool rx_may_override_tx) 1913 { 1914 struct efx_channel *channel; 1915 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * 1916 efx->timer_quantum_ns, 1917 1000); 1918 unsigned int tx_ticks; 1919 unsigned int rx_ticks; 1920 1921 EFX_ASSERT_RESET_SERIALISED(efx); 1922 1923 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) 1924 return -EINVAL; 1925 1926 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); 1927 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); 1928 1929 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && 1930 !rx_may_override_tx) { 1931 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 1932 "RX and TX IRQ moderation must be equal\n"); 1933 return -EINVAL; 1934 } 1935 1936 efx->irq_rx_adaptive = rx_adaptive; 1937 efx->irq_rx_moderation = rx_ticks; 1938 efx_for_each_channel(channel, efx) { 1939 if (efx_channel_has_rx_queue(channel)) 1940 channel->irq_moderation = rx_ticks; 1941 else if (efx_channel_has_tx_queues(channel)) 1942 channel->irq_moderation = tx_ticks; 1943 } 1944 1945 return 0; 1946 } 1947 1948 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 1949 unsigned int *rx_usecs, bool *rx_adaptive) 1950 { 1951 /* We must round up when converting ticks to microseconds 1952 * because we round down when converting the other way. 1953 */ 1954 1955 *rx_adaptive = efx->irq_rx_adaptive; 1956 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * 1957 efx->timer_quantum_ns, 1958 1000); 1959 1960 /* If channels are shared between RX and TX, so is IRQ 1961 * moderation. Otherwise, IRQ moderation is the same for all 1962 * TX channels and is not adaptive. 1963 */ 1964 if (efx->tx_channel_offset == 0) 1965 *tx_usecs = *rx_usecs; 1966 else 1967 *tx_usecs = DIV_ROUND_UP( 1968 efx->channel[efx->tx_channel_offset]->irq_moderation * 1969 efx->timer_quantum_ns, 1970 1000); 1971 } 1972 1973 /************************************************************************** 1974 * 1975 * Hardware monitor 1976 * 1977 **************************************************************************/ 1978 1979 /* Run periodically off the general workqueue */ 1980 static void efx_monitor(struct work_struct *data) 1981 { 1982 struct efx_nic *efx = container_of(data, struct efx_nic, 1983 monitor_work.work); 1984 1985 netif_vdbg(efx, timer, efx->net_dev, 1986 "hardware monitor executing on CPU %d\n", 1987 raw_smp_processor_id()); 1988 BUG_ON(efx->type->monitor == NULL); 1989 1990 /* If the mac_lock is already held then it is likely a port 1991 * reconfiguration is already in place, which will likely do 1992 * most of the work of monitor() anyway. */ 1993 if (mutex_trylock(&efx->mac_lock)) { 1994 if (efx->port_enabled) 1995 efx->type->monitor(efx); 1996 mutex_unlock(&efx->mac_lock); 1997 } 1998 1999 queue_delayed_work(efx->workqueue, &efx->monitor_work, 2000 efx_monitor_interval); 2001 } 2002 2003 /************************************************************************** 2004 * 2005 * ioctls 2006 * 2007 *************************************************************************/ 2008 2009 /* Net device ioctl 2010 * Context: process, rtnl_lock() held. 2011 */ 2012 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 2013 { 2014 struct efx_nic *efx = netdev_priv(net_dev); 2015 struct mii_ioctl_data *data = if_mii(ifr); 2016 2017 if (cmd == SIOCSHWTSTAMP) 2018 return efx_ptp_set_ts_config(efx, ifr); 2019 if (cmd == SIOCGHWTSTAMP) 2020 return efx_ptp_get_ts_config(efx, ifr); 2021 2022 /* Convert phy_id from older PRTAD/DEVAD format */ 2023 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 2024 (data->phy_id & 0xfc00) == 0x0400) 2025 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 2026 2027 return mdio_mii_ioctl(&efx->mdio, data, cmd); 2028 } 2029 2030 /************************************************************************** 2031 * 2032 * NAPI interface 2033 * 2034 **************************************************************************/ 2035 2036 static void efx_init_napi_channel(struct efx_channel *channel) 2037 { 2038 struct efx_nic *efx = channel->efx; 2039 2040 channel->napi_dev = efx->net_dev; 2041 netif_napi_add(channel->napi_dev, &channel->napi_str, 2042 efx_poll, napi_weight); 2043 napi_hash_add(&channel->napi_str); 2044 efx_channel_init_lock(channel); 2045 } 2046 2047 static void efx_init_napi(struct efx_nic *efx) 2048 { 2049 struct efx_channel *channel; 2050 2051 efx_for_each_channel(channel, efx) 2052 efx_init_napi_channel(channel); 2053 } 2054 2055 static void efx_fini_napi_channel(struct efx_channel *channel) 2056 { 2057 if (channel->napi_dev) { 2058 netif_napi_del(&channel->napi_str); 2059 napi_hash_del(&channel->napi_str); 2060 } 2061 channel->napi_dev = NULL; 2062 } 2063 2064 static void efx_fini_napi(struct efx_nic *efx) 2065 { 2066 struct efx_channel *channel; 2067 2068 efx_for_each_channel(channel, efx) 2069 efx_fini_napi_channel(channel); 2070 } 2071 2072 /************************************************************************** 2073 * 2074 * Kernel netpoll interface 2075 * 2076 *************************************************************************/ 2077 2078 #ifdef CONFIG_NET_POLL_CONTROLLER 2079 2080 /* Although in the common case interrupts will be disabled, this is not 2081 * guaranteed. However, all our work happens inside the NAPI callback, 2082 * so no locking is required. 2083 */ 2084 static void efx_netpoll(struct net_device *net_dev) 2085 { 2086 struct efx_nic *efx = netdev_priv(net_dev); 2087 struct efx_channel *channel; 2088 2089 efx_for_each_channel(channel, efx) 2090 efx_schedule_channel(channel); 2091 } 2092 2093 #endif 2094 2095 #ifdef CONFIG_NET_RX_BUSY_POLL 2096 static int efx_busy_poll(struct napi_struct *napi) 2097 { 2098 struct efx_channel *channel = 2099 container_of(napi, struct efx_channel, napi_str); 2100 struct efx_nic *efx = channel->efx; 2101 int budget = 4; 2102 int old_rx_packets, rx_packets; 2103 2104 if (!netif_running(efx->net_dev)) 2105 return LL_FLUSH_FAILED; 2106 2107 if (!efx_channel_lock_poll(channel)) 2108 return LL_FLUSH_BUSY; 2109 2110 old_rx_packets = channel->rx_queue.rx_packets; 2111 efx_process_channel(channel, budget); 2112 2113 rx_packets = channel->rx_queue.rx_packets - old_rx_packets; 2114 2115 /* There is no race condition with NAPI here. 2116 * NAPI will automatically be rescheduled if it yielded during busy 2117 * polling, because it was not able to take the lock and thus returned 2118 * the full budget. 2119 */ 2120 efx_channel_unlock_poll(channel); 2121 2122 return rx_packets; 2123 } 2124 #endif 2125 2126 /************************************************************************** 2127 * 2128 * Kernel net device interface 2129 * 2130 *************************************************************************/ 2131 2132 /* Context: process, rtnl_lock() held. */ 2133 int efx_net_open(struct net_device *net_dev) 2134 { 2135 struct efx_nic *efx = netdev_priv(net_dev); 2136 int rc; 2137 2138 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 2139 raw_smp_processor_id()); 2140 2141 rc = efx_check_disabled(efx); 2142 if (rc) 2143 return rc; 2144 if (efx->phy_mode & PHY_MODE_SPECIAL) 2145 return -EBUSY; 2146 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 2147 return -EIO; 2148 2149 /* Notify the kernel of the link state polled during driver load, 2150 * before the monitor starts running */ 2151 efx_link_status_changed(efx); 2152 2153 efx_start_all(efx); 2154 efx_selftest_async_start(efx); 2155 return 0; 2156 } 2157 2158 /* Context: process, rtnl_lock() held. 2159 * Note that the kernel will ignore our return code; this method 2160 * should really be a void. 2161 */ 2162 int efx_net_stop(struct net_device *net_dev) 2163 { 2164 struct efx_nic *efx = netdev_priv(net_dev); 2165 2166 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 2167 raw_smp_processor_id()); 2168 2169 /* Stop the device and flush all the channels */ 2170 efx_stop_all(efx); 2171 2172 return 0; 2173 } 2174 2175 /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 2176 static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, 2177 struct rtnl_link_stats64 *stats) 2178 { 2179 struct efx_nic *efx = netdev_priv(net_dev); 2180 2181 spin_lock_bh(&efx->stats_lock); 2182 efx->type->update_stats(efx, NULL, stats); 2183 spin_unlock_bh(&efx->stats_lock); 2184 2185 return stats; 2186 } 2187 2188 /* Context: netif_tx_lock held, BHs disabled. */ 2189 static void efx_watchdog(struct net_device *net_dev) 2190 { 2191 struct efx_nic *efx = netdev_priv(net_dev); 2192 2193 netif_err(efx, tx_err, efx->net_dev, 2194 "TX stuck with port_enabled=%d: resetting channels\n", 2195 efx->port_enabled); 2196 2197 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 2198 } 2199 2200 2201 /* Context: process, rtnl_lock() held. */ 2202 static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 2203 { 2204 struct efx_nic *efx = netdev_priv(net_dev); 2205 int rc; 2206 2207 rc = efx_check_disabled(efx); 2208 if (rc) 2209 return rc; 2210 if (new_mtu > EFX_MAX_MTU) 2211 return -EINVAL; 2212 2213 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 2214 2215 efx_device_detach_sync(efx); 2216 efx_stop_all(efx); 2217 2218 mutex_lock(&efx->mac_lock); 2219 net_dev->mtu = new_mtu; 2220 efx_mac_reconfigure(efx); 2221 mutex_unlock(&efx->mac_lock); 2222 2223 efx_start_all(efx); 2224 netif_device_attach(efx->net_dev); 2225 return 0; 2226 } 2227 2228 static int efx_set_mac_address(struct net_device *net_dev, void *data) 2229 { 2230 struct efx_nic *efx = netdev_priv(net_dev); 2231 struct sockaddr *addr = data; 2232 u8 *new_addr = addr->sa_data; 2233 u8 old_addr[6]; 2234 int rc; 2235 2236 if (!is_valid_ether_addr(new_addr)) { 2237 netif_err(efx, drv, efx->net_dev, 2238 "invalid ethernet MAC address requested: %pM\n", 2239 new_addr); 2240 return -EADDRNOTAVAIL; 2241 } 2242 2243 /* save old address */ 2244 ether_addr_copy(old_addr, net_dev->dev_addr); 2245 ether_addr_copy(net_dev->dev_addr, new_addr); 2246 if (efx->type->set_mac_address) { 2247 rc = efx->type->set_mac_address(efx); 2248 if (rc) { 2249 ether_addr_copy(net_dev->dev_addr, old_addr); 2250 return rc; 2251 } 2252 } 2253 2254 /* Reconfigure the MAC */ 2255 mutex_lock(&efx->mac_lock); 2256 efx_mac_reconfigure(efx); 2257 mutex_unlock(&efx->mac_lock); 2258 2259 return 0; 2260 } 2261 2262 /* Context: netif_addr_lock held, BHs disabled. */ 2263 static void efx_set_rx_mode(struct net_device *net_dev) 2264 { 2265 struct efx_nic *efx = netdev_priv(net_dev); 2266 2267 if (efx->port_enabled) 2268 queue_work(efx->workqueue, &efx->mac_work); 2269 /* Otherwise efx_start_port() will do this */ 2270 } 2271 2272 static int efx_set_features(struct net_device *net_dev, netdev_features_t data) 2273 { 2274 struct efx_nic *efx = netdev_priv(net_dev); 2275 2276 /* If disabling RX n-tuple filtering, clear existing filters */ 2277 if (net_dev->features & ~data & NETIF_F_NTUPLE) 2278 return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); 2279 2280 return 0; 2281 } 2282 2283 static const struct net_device_ops efx_netdev_ops = { 2284 .ndo_open = efx_net_open, 2285 .ndo_stop = efx_net_stop, 2286 .ndo_get_stats64 = efx_net_stats, 2287 .ndo_tx_timeout = efx_watchdog, 2288 .ndo_start_xmit = efx_hard_start_xmit, 2289 .ndo_validate_addr = eth_validate_addr, 2290 .ndo_do_ioctl = efx_ioctl, 2291 .ndo_change_mtu = efx_change_mtu, 2292 .ndo_set_mac_address = efx_set_mac_address, 2293 .ndo_set_rx_mode = efx_set_rx_mode, 2294 .ndo_set_features = efx_set_features, 2295 #ifdef CONFIG_SFC_SRIOV 2296 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 2297 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 2298 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 2299 .ndo_get_vf_config = efx_sriov_get_vf_config, 2300 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 2301 .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, 2302 #endif 2303 #ifdef CONFIG_NET_POLL_CONTROLLER 2304 .ndo_poll_controller = efx_netpoll, 2305 #endif 2306 .ndo_setup_tc = efx_setup_tc, 2307 #ifdef CONFIG_NET_RX_BUSY_POLL 2308 .ndo_busy_poll = efx_busy_poll, 2309 #endif 2310 #ifdef CONFIG_RFS_ACCEL 2311 .ndo_rx_flow_steer = efx_filter_rfs, 2312 #endif 2313 }; 2314 2315 static void efx_update_name(struct efx_nic *efx) 2316 { 2317 strcpy(efx->name, efx->net_dev->name); 2318 efx_mtd_rename(efx); 2319 efx_set_channel_names(efx); 2320 } 2321 2322 static int efx_netdev_event(struct notifier_block *this, 2323 unsigned long event, void *ptr) 2324 { 2325 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2326 2327 if ((net_dev->netdev_ops == &efx_netdev_ops) && 2328 event == NETDEV_CHANGENAME) 2329 efx_update_name(netdev_priv(net_dev)); 2330 2331 return NOTIFY_DONE; 2332 } 2333 2334 static struct notifier_block efx_netdev_notifier = { 2335 .notifier_call = efx_netdev_event, 2336 }; 2337 2338 static ssize_t 2339 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 2340 { 2341 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2342 return sprintf(buf, "%d\n", efx->phy_type); 2343 } 2344 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); 2345 2346 #ifdef CONFIG_SFC_MCDI_LOGGING 2347 static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, 2348 char *buf) 2349 { 2350 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2351 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 2352 2353 return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); 2354 } 2355 static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, 2356 const char *buf, size_t count) 2357 { 2358 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2359 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 2360 bool enable = count > 0 && *buf != '0'; 2361 2362 mcdi->logging_enabled = enable; 2363 return count; 2364 } 2365 static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log); 2366 #endif 2367 2368 static int efx_register_netdev(struct efx_nic *efx) 2369 { 2370 struct net_device *net_dev = efx->net_dev; 2371 struct efx_channel *channel; 2372 int rc; 2373 2374 net_dev->watchdog_timeo = 5 * HZ; 2375 net_dev->irq = efx->pci_dev->irq; 2376 net_dev->netdev_ops = &efx_netdev_ops; 2377 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 2378 net_dev->priv_flags |= IFF_UNICAST_FLT; 2379 net_dev->ethtool_ops = &efx_ethtool_ops; 2380 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2381 2382 rtnl_lock(); 2383 2384 /* Enable resets to be scheduled and check whether any were 2385 * already requested. If so, the NIC is probably hosed so we 2386 * abort. 2387 */ 2388 efx->state = STATE_READY; 2389 smp_mb(); /* ensure we change state before checking reset_pending */ 2390 if (efx->reset_pending) { 2391 netif_err(efx, probe, efx->net_dev, 2392 "aborting probe due to scheduled reset\n"); 2393 rc = -EIO; 2394 goto fail_locked; 2395 } 2396 2397 rc = dev_alloc_name(net_dev, net_dev->name); 2398 if (rc < 0) 2399 goto fail_locked; 2400 efx_update_name(efx); 2401 2402 /* Always start with carrier off; PHY events will detect the link */ 2403 netif_carrier_off(net_dev); 2404 2405 rc = register_netdevice(net_dev); 2406 if (rc) 2407 goto fail_locked; 2408 2409 efx_for_each_channel(channel, efx) { 2410 struct efx_tx_queue *tx_queue; 2411 efx_for_each_channel_tx_queue(tx_queue, channel) 2412 efx_init_tx_queue_core_txq(tx_queue); 2413 } 2414 2415 efx_associate(efx); 2416 2417 rtnl_unlock(); 2418 2419 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2420 if (rc) { 2421 netif_err(efx, drv, efx->net_dev, 2422 "failed to init net dev attributes\n"); 2423 goto fail_registered; 2424 } 2425 #ifdef CONFIG_SFC_MCDI_LOGGING 2426 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); 2427 if (rc) { 2428 netif_err(efx, drv, efx->net_dev, 2429 "failed to init net dev attributes\n"); 2430 goto fail_attr_mcdi_logging; 2431 } 2432 #endif 2433 2434 return 0; 2435 2436 #ifdef CONFIG_SFC_MCDI_LOGGING 2437 fail_attr_mcdi_logging: 2438 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2439 #endif 2440 fail_registered: 2441 rtnl_lock(); 2442 efx_dissociate(efx); 2443 unregister_netdevice(net_dev); 2444 fail_locked: 2445 efx->state = STATE_UNINIT; 2446 rtnl_unlock(); 2447 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2448 return rc; 2449 } 2450 2451 static void efx_unregister_netdev(struct efx_nic *efx) 2452 { 2453 if (!efx->net_dev) 2454 return; 2455 2456 BUG_ON(netdev_priv(efx->net_dev) != efx); 2457 2458 if (efx_dev_registered(efx)) { 2459 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2460 #ifdef CONFIG_SFC_MCDI_LOGGING 2461 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); 2462 #endif 2463 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2464 unregister_netdev(efx->net_dev); 2465 } 2466 } 2467 2468 /************************************************************************** 2469 * 2470 * Device reset and suspend 2471 * 2472 **************************************************************************/ 2473 2474 /* Tears down the entire software state and most of the hardware state 2475 * before reset. */ 2476 void efx_reset_down(struct efx_nic *efx, enum reset_type method) 2477 { 2478 EFX_ASSERT_RESET_SERIALISED(efx); 2479 2480 if (method == RESET_TYPE_MCDI_TIMEOUT) 2481 efx->type->prepare_flr(efx); 2482 2483 efx_stop_all(efx); 2484 efx_disable_interrupts(efx); 2485 2486 mutex_lock(&efx->mac_lock); 2487 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2488 method != RESET_TYPE_DATAPATH) 2489 efx->phy_op->fini(efx); 2490 efx->type->fini(efx); 2491 } 2492 2493 /* This function will always ensure that the locks acquired in 2494 * efx_reset_down() are released. A failure return code indicates 2495 * that we were unable to reinitialise the hardware, and the 2496 * driver should be disabled. If ok is false, then the rx and tx 2497 * engines are not restarted, pending a RESET_DISABLE. */ 2498 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) 2499 { 2500 int rc; 2501 2502 EFX_ASSERT_RESET_SERIALISED(efx); 2503 2504 if (method == RESET_TYPE_MCDI_TIMEOUT) 2505 efx->type->finish_flr(efx); 2506 2507 /* Ensure that SRAM is initialised even if we're disabling the device */ 2508 rc = efx->type->init(efx); 2509 if (rc) { 2510 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2511 goto fail; 2512 } 2513 2514 if (!ok) 2515 goto fail; 2516 2517 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2518 method != RESET_TYPE_DATAPATH) { 2519 rc = efx->phy_op->init(efx); 2520 if (rc) 2521 goto fail; 2522 rc = efx->phy_op->reconfigure(efx); 2523 if (rc && rc != -EPERM) 2524 netif_err(efx, drv, efx->net_dev, 2525 "could not restore PHY settings\n"); 2526 } 2527 2528 rc = efx_enable_interrupts(efx); 2529 if (rc) 2530 goto fail; 2531 2532 #ifdef CONFIG_SFC_SRIOV 2533 rc = efx->type->vswitching_restore(efx); 2534 if (rc) /* not fatal; the PF will still work fine */ 2535 netif_warn(efx, probe, efx->net_dev, 2536 "failed to restore vswitching rc=%d;" 2537 " VFs may not function\n", rc); 2538 #endif 2539 2540 down_read(&efx->filter_sem); 2541 efx_restore_filters(efx); 2542 up_read(&efx->filter_sem); 2543 if (efx->type->sriov_reset) 2544 efx->type->sriov_reset(efx); 2545 2546 mutex_unlock(&efx->mac_lock); 2547 2548 efx_start_all(efx); 2549 2550 return 0; 2551 2552 fail: 2553 efx->port_initialized = false; 2554 2555 mutex_unlock(&efx->mac_lock); 2556 2557 return rc; 2558 } 2559 2560 /* Reset the NIC using the specified method. Note that the reset may 2561 * fail, in which case the card will be left in an unusable state. 2562 * 2563 * Caller must hold the rtnl_lock. 2564 */ 2565 int efx_reset(struct efx_nic *efx, enum reset_type method) 2566 { 2567 int rc, rc2; 2568 bool disabled; 2569 2570 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2571 RESET_TYPE(method)); 2572 2573 efx_device_detach_sync(efx); 2574 efx_reset_down(efx, method); 2575 2576 rc = efx->type->reset(efx, method); 2577 if (rc) { 2578 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 2579 goto out; 2580 } 2581 2582 /* Clear flags for the scopes we covered. We assume the NIC and 2583 * driver are now quiescent so that there is no race here. 2584 */ 2585 if (method < RESET_TYPE_MAX_METHOD) 2586 efx->reset_pending &= -(1 << (method + 1)); 2587 else /* it doesn't fit into the well-ordered scope hierarchy */ 2588 __clear_bit(method, &efx->reset_pending); 2589 2590 /* Reinitialise bus-mastering, which may have been turned off before 2591 * the reset was scheduled. This is still appropriate, even in the 2592 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 2593 * can respond to requests. */ 2594 pci_set_master(efx->pci_dev); 2595 2596 out: 2597 /* Leave device stopped if necessary */ 2598 disabled = rc || 2599 method == RESET_TYPE_DISABLE || 2600 method == RESET_TYPE_RECOVER_OR_DISABLE; 2601 rc2 = efx_reset_up(efx, method, !disabled); 2602 if (rc2) { 2603 disabled = true; 2604 if (!rc) 2605 rc = rc2; 2606 } 2607 2608 if (disabled) { 2609 dev_close(efx->net_dev); 2610 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 2611 efx->state = STATE_DISABLED; 2612 } else { 2613 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2614 netif_device_attach(efx->net_dev); 2615 } 2616 return rc; 2617 } 2618 2619 /* Try recovery mechanisms. 2620 * For now only EEH is supported. 2621 * Returns 0 if the recovery mechanisms are unsuccessful. 2622 * Returns a non-zero value otherwise. 2623 */ 2624 int efx_try_recovery(struct efx_nic *efx) 2625 { 2626 #ifdef CONFIG_EEH 2627 /* A PCI error can occur and not be seen by EEH because nothing 2628 * happens on the PCI bus. In this case the driver may fail and 2629 * schedule a 'recover or reset', leading to this recovery handler. 2630 * Manually call the eeh failure check function. 2631 */ 2632 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); 2633 if (eeh_dev_check_failure(eehdev)) { 2634 /* The EEH mechanisms will handle the error and reset the 2635 * device if necessary. 2636 */ 2637 return 1; 2638 } 2639 #endif 2640 return 0; 2641 } 2642 2643 static void efx_wait_for_bist_end(struct efx_nic *efx) 2644 { 2645 int i; 2646 2647 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { 2648 if (efx_mcdi_poll_reboot(efx)) 2649 goto out; 2650 msleep(BIST_WAIT_DELAY_MS); 2651 } 2652 2653 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); 2654 out: 2655 /* Either way unset the BIST flag. If we found no reboot we probably 2656 * won't recover, but we should try. 2657 */ 2658 efx->mc_bist_for_other_fn = false; 2659 } 2660 2661 /* The worker thread exists so that code that cannot sleep can 2662 * schedule a reset for later. 2663 */ 2664 static void efx_reset_work(struct work_struct *data) 2665 { 2666 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2667 unsigned long pending; 2668 enum reset_type method; 2669 2670 pending = ACCESS_ONCE(efx->reset_pending); 2671 method = fls(pending) - 1; 2672 2673 if (method == RESET_TYPE_MC_BIST) 2674 efx_wait_for_bist_end(efx); 2675 2676 if ((method == RESET_TYPE_RECOVER_OR_DISABLE || 2677 method == RESET_TYPE_RECOVER_OR_ALL) && 2678 efx_try_recovery(efx)) 2679 return; 2680 2681 if (!pending) 2682 return; 2683 2684 rtnl_lock(); 2685 2686 /* We checked the state in efx_schedule_reset() but it may 2687 * have changed by now. Now that we have the RTNL lock, 2688 * it cannot change again. 2689 */ 2690 if (efx->state == STATE_READY) 2691 (void)efx_reset(efx, method); 2692 2693 rtnl_unlock(); 2694 } 2695 2696 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 2697 { 2698 enum reset_type method; 2699 2700 if (efx->state == STATE_RECOVERY) { 2701 netif_dbg(efx, drv, efx->net_dev, 2702 "recovering: skip scheduling %s reset\n", 2703 RESET_TYPE(type)); 2704 return; 2705 } 2706 2707 switch (type) { 2708 case RESET_TYPE_INVISIBLE: 2709 case RESET_TYPE_ALL: 2710 case RESET_TYPE_RECOVER_OR_ALL: 2711 case RESET_TYPE_WORLD: 2712 case RESET_TYPE_DISABLE: 2713 case RESET_TYPE_RECOVER_OR_DISABLE: 2714 case RESET_TYPE_DATAPATH: 2715 case RESET_TYPE_MC_BIST: 2716 case RESET_TYPE_MCDI_TIMEOUT: 2717 method = type; 2718 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2719 RESET_TYPE(method)); 2720 break; 2721 default: 2722 method = efx->type->map_reset_reason(type); 2723 netif_dbg(efx, drv, efx->net_dev, 2724 "scheduling %s reset for %s\n", 2725 RESET_TYPE(method), RESET_TYPE(type)); 2726 break; 2727 } 2728 2729 set_bit(method, &efx->reset_pending); 2730 smp_mb(); /* ensure we change reset_pending before checking state */ 2731 2732 /* If we're not READY then just leave the flags set as the cue 2733 * to abort probing or reschedule the reset later. 2734 */ 2735 if (ACCESS_ONCE(efx->state) != STATE_READY) 2736 return; 2737 2738 /* efx_process_channel() will no longer read events once a 2739 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2740 efx_mcdi_mode_poll(efx); 2741 2742 queue_work(reset_workqueue, &efx->reset_work); 2743 } 2744 2745 /************************************************************************** 2746 * 2747 * List of NICs we support 2748 * 2749 **************************************************************************/ 2750 2751 /* PCI device ID table */ 2752 static const struct pci_device_id efx_pci_table[] = { 2753 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2754 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), 2755 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2756 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2757 PCI_DEVICE_ID_SOLARFLARE_SFC4000B), 2758 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2759 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ 2760 .driver_data = (unsigned long) &siena_a0_nic_type}, 2761 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 2762 .driver_data = (unsigned long) &siena_a0_nic_type}, 2763 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 2764 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 2765 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 2766 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 2767 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 2768 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 2769 {0} /* end of list */ 2770 }; 2771 2772 /************************************************************************** 2773 * 2774 * Dummy PHY/MAC operations 2775 * 2776 * Can be used for some unimplemented operations 2777 * Needed so all function pointers are valid and do not have to be tested 2778 * before use 2779 * 2780 **************************************************************************/ 2781 int efx_port_dummy_op_int(struct efx_nic *efx) 2782 { 2783 return 0; 2784 } 2785 void efx_port_dummy_op_void(struct efx_nic *efx) {} 2786 2787 static bool efx_port_dummy_op_poll(struct efx_nic *efx) 2788 { 2789 return false; 2790 } 2791 2792 static const struct efx_phy_operations efx_dummy_phy_operations = { 2793 .init = efx_port_dummy_op_int, 2794 .reconfigure = efx_port_dummy_op_int, 2795 .poll = efx_port_dummy_op_poll, 2796 .fini = efx_port_dummy_op_void, 2797 }; 2798 2799 /************************************************************************** 2800 * 2801 * Data housekeeping 2802 * 2803 **************************************************************************/ 2804 2805 /* This zeroes out and then fills in the invariants in a struct 2806 * efx_nic (including all sub-structures). 2807 */ 2808 static int efx_init_struct(struct efx_nic *efx, 2809 struct pci_dev *pci_dev, struct net_device *net_dev) 2810 { 2811 int i; 2812 2813 /* Initialise common structures */ 2814 INIT_LIST_HEAD(&efx->node); 2815 INIT_LIST_HEAD(&efx->secondary_list); 2816 spin_lock_init(&efx->biu_lock); 2817 #ifdef CONFIG_SFC_MTD 2818 INIT_LIST_HEAD(&efx->mtd_list); 2819 #endif 2820 INIT_WORK(&efx->reset_work, efx_reset_work); 2821 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2822 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 2823 efx->pci_dev = pci_dev; 2824 efx->msg_enable = debug; 2825 efx->state = STATE_UNINIT; 2826 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2827 2828 efx->net_dev = net_dev; 2829 efx->rx_prefix_size = efx->type->rx_prefix_size; 2830 efx->rx_ip_align = 2831 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; 2832 efx->rx_packet_hash_offset = 2833 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2834 efx->rx_packet_ts_offset = 2835 efx->type->rx_ts_offset - efx->type->rx_prefix_size; 2836 spin_lock_init(&efx->stats_lock); 2837 mutex_init(&efx->mac_lock); 2838 efx->phy_op = &efx_dummy_phy_operations; 2839 efx->mdio.dev = net_dev; 2840 INIT_WORK(&efx->mac_work, efx_mac_work); 2841 init_waitqueue_head(&efx->flush_wq); 2842 2843 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2844 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2845 if (!efx->channel[i]) 2846 goto fail; 2847 efx->msi_context[i].efx = efx; 2848 efx->msi_context[i].index = i; 2849 } 2850 2851 /* Higher numbered interrupt modes are less capable! */ 2852 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2853 interrupt_mode); 2854 2855 /* Would be good to use the net_dev name, but we're too early */ 2856 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 2857 pci_name(pci_dev)); 2858 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2859 if (!efx->workqueue) 2860 goto fail; 2861 2862 return 0; 2863 2864 fail: 2865 efx_fini_struct(efx); 2866 return -ENOMEM; 2867 } 2868 2869 static void efx_fini_struct(struct efx_nic *efx) 2870 { 2871 int i; 2872 2873 for (i = 0; i < EFX_MAX_CHANNELS; i++) 2874 kfree(efx->channel[i]); 2875 2876 kfree(efx->vpd_sn); 2877 2878 if (efx->workqueue) { 2879 destroy_workqueue(efx->workqueue); 2880 efx->workqueue = NULL; 2881 } 2882 } 2883 2884 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 2885 { 2886 u64 n_rx_nodesc_trunc = 0; 2887 struct efx_channel *channel; 2888 2889 efx_for_each_channel(channel, efx) 2890 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 2891 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 2892 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 2893 } 2894 2895 /************************************************************************** 2896 * 2897 * PCI interface 2898 * 2899 **************************************************************************/ 2900 2901 /* Main body of final NIC shutdown code 2902 * This is called only at module unload (or hotplug removal). 2903 */ 2904 static void efx_pci_remove_main(struct efx_nic *efx) 2905 { 2906 /* Flush reset_work. It can no longer be scheduled since we 2907 * are not READY. 2908 */ 2909 BUG_ON(efx->state == STATE_READY); 2910 cancel_work_sync(&efx->reset_work); 2911 2912 efx_disable_interrupts(efx); 2913 efx_nic_fini_interrupt(efx); 2914 efx_fini_port(efx); 2915 efx->type->fini(efx); 2916 efx_fini_napi(efx); 2917 efx_remove_all(efx); 2918 } 2919 2920 /* Final NIC shutdown 2921 * This is called only at module unload (or hotplug removal). A PF can call 2922 * this on its VFs to ensure they are unbound first. 2923 */ 2924 static void efx_pci_remove(struct pci_dev *pci_dev) 2925 { 2926 struct efx_nic *efx; 2927 2928 efx = pci_get_drvdata(pci_dev); 2929 if (!efx) 2930 return; 2931 2932 /* Mark the NIC as fini, then stop the interface */ 2933 rtnl_lock(); 2934 efx_dissociate(efx); 2935 dev_close(efx->net_dev); 2936 efx_disable_interrupts(efx); 2937 efx->state = STATE_UNINIT; 2938 rtnl_unlock(); 2939 2940 if (efx->type->sriov_fini) 2941 efx->type->sriov_fini(efx); 2942 2943 efx_unregister_netdev(efx); 2944 2945 efx_mtd_remove(efx); 2946 2947 efx_pci_remove_main(efx); 2948 2949 efx_fini_io(efx); 2950 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 2951 2952 efx_fini_struct(efx); 2953 free_netdev(efx->net_dev); 2954 2955 pci_disable_pcie_error_reporting(pci_dev); 2956 }; 2957 2958 /* NIC VPD information 2959 * Called during probe to display the part number of the 2960 * installed NIC. VPD is potentially very large but this should 2961 * always appear within the first 512 bytes. 2962 */ 2963 #define SFC_VPD_LEN 512 2964 static void efx_probe_vpd_strings(struct efx_nic *efx) 2965 { 2966 struct pci_dev *dev = efx->pci_dev; 2967 char vpd_data[SFC_VPD_LEN]; 2968 ssize_t vpd_size; 2969 int ro_start, ro_size, i, j; 2970 2971 /* Get the vpd data from the device */ 2972 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 2973 if (vpd_size <= 0) { 2974 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); 2975 return; 2976 } 2977 2978 /* Get the Read only section */ 2979 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 2980 if (ro_start < 0) { 2981 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); 2982 return; 2983 } 2984 2985 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 2986 j = ro_size; 2987 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 2988 if (i + j > vpd_size) 2989 j = vpd_size - i; 2990 2991 /* Get the Part number */ 2992 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); 2993 if (i < 0) { 2994 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); 2995 return; 2996 } 2997 2998 j = pci_vpd_info_field_size(&vpd_data[i]); 2999 i += PCI_VPD_INFO_FLD_HDR_SIZE; 3000 if (i + j > vpd_size) { 3001 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); 3002 return; 3003 } 3004 3005 netif_info(efx, drv, efx->net_dev, 3006 "Part Number : %.*s\n", j, &vpd_data[i]); 3007 3008 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 3009 j = ro_size; 3010 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); 3011 if (i < 0) { 3012 netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); 3013 return; 3014 } 3015 3016 j = pci_vpd_info_field_size(&vpd_data[i]); 3017 i += PCI_VPD_INFO_FLD_HDR_SIZE; 3018 if (i + j > vpd_size) { 3019 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); 3020 return; 3021 } 3022 3023 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); 3024 if (!efx->vpd_sn) 3025 return; 3026 3027 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); 3028 } 3029 3030 3031 /* Main body of NIC initialisation 3032 * This is called at module load (or hotplug insertion, theoretically). 3033 */ 3034 static int efx_pci_probe_main(struct efx_nic *efx) 3035 { 3036 int rc; 3037 3038 /* Do start-of-day initialisation */ 3039 rc = efx_probe_all(efx); 3040 if (rc) 3041 goto fail1; 3042 3043 efx_init_napi(efx); 3044 3045 rc = efx->type->init(efx); 3046 if (rc) { 3047 netif_err(efx, probe, efx->net_dev, 3048 "failed to initialise NIC\n"); 3049 goto fail3; 3050 } 3051 3052 rc = efx_init_port(efx); 3053 if (rc) { 3054 netif_err(efx, probe, efx->net_dev, 3055 "failed to initialise port\n"); 3056 goto fail4; 3057 } 3058 3059 rc = efx_nic_init_interrupt(efx); 3060 if (rc) 3061 goto fail5; 3062 rc = efx_enable_interrupts(efx); 3063 if (rc) 3064 goto fail6; 3065 3066 return 0; 3067 3068 fail6: 3069 efx_nic_fini_interrupt(efx); 3070 fail5: 3071 efx_fini_port(efx); 3072 fail4: 3073 efx->type->fini(efx); 3074 fail3: 3075 efx_fini_napi(efx); 3076 efx_remove_all(efx); 3077 fail1: 3078 return rc; 3079 } 3080 3081 /* NIC initialisation 3082 * 3083 * This is called at module load (or hotplug insertion, 3084 * theoretically). It sets up PCI mappings, resets the NIC, 3085 * sets up and registers the network devices with the kernel and hooks 3086 * the interrupt service routine. It does not prepare the device for 3087 * transmission; this is left to the first time one of the network 3088 * interfaces is brought up (i.e. efx_net_open). 3089 */ 3090 static int efx_pci_probe(struct pci_dev *pci_dev, 3091 const struct pci_device_id *entry) 3092 { 3093 struct net_device *net_dev; 3094 struct efx_nic *efx; 3095 int rc; 3096 3097 /* Allocate and initialise a struct net_device and struct efx_nic */ 3098 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 3099 EFX_MAX_RX_QUEUES); 3100 if (!net_dev) 3101 return -ENOMEM; 3102 efx = netdev_priv(net_dev); 3103 efx->type = (const struct efx_nic_type *) entry->driver_data; 3104 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 3105 NETIF_F_HIGHDMA | NETIF_F_TSO | 3106 NETIF_F_RXCSUM); 3107 if (efx->type->offload_features & NETIF_F_V6_CSUM) 3108 net_dev->features |= NETIF_F_TSO6; 3109 /* Mask for features that also apply to VLAN devices */ 3110 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 3111 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 3112 NETIF_F_RXCSUM); 3113 /* All offloads can be toggled */ 3114 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 3115 pci_set_drvdata(pci_dev, efx); 3116 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 3117 rc = efx_init_struct(efx, pci_dev, net_dev); 3118 if (rc) 3119 goto fail1; 3120 3121 netif_info(efx, probe, efx->net_dev, 3122 "Solarflare NIC detected\n"); 3123 3124 if (!efx->type->is_vf) 3125 efx_probe_vpd_strings(efx); 3126 3127 /* Set up basic I/O (BAR mappings etc) */ 3128 rc = efx_init_io(efx); 3129 if (rc) 3130 goto fail2; 3131 3132 rc = efx_pci_probe_main(efx); 3133 if (rc) 3134 goto fail3; 3135 3136 rc = efx_register_netdev(efx); 3137 if (rc) 3138 goto fail4; 3139 3140 if (efx->type->sriov_init) { 3141 rc = efx->type->sriov_init(efx); 3142 if (rc) 3143 netif_err(efx, probe, efx->net_dev, 3144 "SR-IOV can't be enabled rc %d\n", rc); 3145 } 3146 3147 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 3148 3149 /* Try to create MTDs, but allow this to fail */ 3150 rtnl_lock(); 3151 rc = efx_mtd_probe(efx); 3152 rtnl_unlock(); 3153 if (rc) 3154 netif_warn(efx, probe, efx->net_dev, 3155 "failed to create MTDs (%d)\n", rc); 3156 3157 rc = pci_enable_pcie_error_reporting(pci_dev); 3158 if (rc && rc != -EINVAL) 3159 netif_warn(efx, probe, efx->net_dev, 3160 "pci_enable_pcie_error_reporting failed (%d)\n", rc); 3161 3162 return 0; 3163 3164 fail4: 3165 efx_pci_remove_main(efx); 3166 fail3: 3167 efx_fini_io(efx); 3168 fail2: 3169 efx_fini_struct(efx); 3170 fail1: 3171 WARN_ON(rc > 0); 3172 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 3173 free_netdev(net_dev); 3174 return rc; 3175 } 3176 3177 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 3178 * enabled on success 3179 */ 3180 #ifdef CONFIG_SFC_SRIOV 3181 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 3182 { 3183 int rc; 3184 struct efx_nic *efx = pci_get_drvdata(dev); 3185 3186 if (efx->type->sriov_configure) { 3187 rc = efx->type->sriov_configure(efx, num_vfs); 3188 if (rc) 3189 return rc; 3190 else 3191 return num_vfs; 3192 } else 3193 return -EOPNOTSUPP; 3194 } 3195 #endif 3196 3197 static int efx_pm_freeze(struct device *dev) 3198 { 3199 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 3200 3201 rtnl_lock(); 3202 3203 if (efx->state != STATE_DISABLED) { 3204 efx->state = STATE_UNINIT; 3205 3206 efx_device_detach_sync(efx); 3207 3208 efx_stop_all(efx); 3209 efx_disable_interrupts(efx); 3210 } 3211 3212 rtnl_unlock(); 3213 3214 return 0; 3215 } 3216 3217 static int efx_pm_thaw(struct device *dev) 3218 { 3219 int rc; 3220 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 3221 3222 rtnl_lock(); 3223 3224 if (efx->state != STATE_DISABLED) { 3225 rc = efx_enable_interrupts(efx); 3226 if (rc) 3227 goto fail; 3228 3229 mutex_lock(&efx->mac_lock); 3230 efx->phy_op->reconfigure(efx); 3231 mutex_unlock(&efx->mac_lock); 3232 3233 efx_start_all(efx); 3234 3235 netif_device_attach(efx->net_dev); 3236 3237 efx->state = STATE_READY; 3238 3239 efx->type->resume_wol(efx); 3240 } 3241 3242 rtnl_unlock(); 3243 3244 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 3245 queue_work(reset_workqueue, &efx->reset_work); 3246 3247 return 0; 3248 3249 fail: 3250 rtnl_unlock(); 3251 3252 return rc; 3253 } 3254 3255 static int efx_pm_poweroff(struct device *dev) 3256 { 3257 struct pci_dev *pci_dev = to_pci_dev(dev); 3258 struct efx_nic *efx = pci_get_drvdata(pci_dev); 3259 3260 efx->type->fini(efx); 3261 3262 efx->reset_pending = 0; 3263 3264 pci_save_state(pci_dev); 3265 return pci_set_power_state(pci_dev, PCI_D3hot); 3266 } 3267 3268 /* Used for both resume and restore */ 3269 static int efx_pm_resume(struct device *dev) 3270 { 3271 struct pci_dev *pci_dev = to_pci_dev(dev); 3272 struct efx_nic *efx = pci_get_drvdata(pci_dev); 3273 int rc; 3274 3275 rc = pci_set_power_state(pci_dev, PCI_D0); 3276 if (rc) 3277 return rc; 3278 pci_restore_state(pci_dev); 3279 rc = pci_enable_device(pci_dev); 3280 if (rc) 3281 return rc; 3282 pci_set_master(efx->pci_dev); 3283 rc = efx->type->reset(efx, RESET_TYPE_ALL); 3284 if (rc) 3285 return rc; 3286 rc = efx->type->init(efx); 3287 if (rc) 3288 return rc; 3289 rc = efx_pm_thaw(dev); 3290 return rc; 3291 } 3292 3293 static int efx_pm_suspend(struct device *dev) 3294 { 3295 int rc; 3296 3297 efx_pm_freeze(dev); 3298 rc = efx_pm_poweroff(dev); 3299 if (rc) 3300 efx_pm_resume(dev); 3301 return rc; 3302 } 3303 3304 static const struct dev_pm_ops efx_pm_ops = { 3305 .suspend = efx_pm_suspend, 3306 .resume = efx_pm_resume, 3307 .freeze = efx_pm_freeze, 3308 .thaw = efx_pm_thaw, 3309 .poweroff = efx_pm_poweroff, 3310 .restore = efx_pm_resume, 3311 }; 3312 3313 /* A PCI error affecting this device was detected. 3314 * At this point MMIO and DMA may be disabled. 3315 * Stop the software path and request a slot reset. 3316 */ 3317 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, 3318 enum pci_channel_state state) 3319 { 3320 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3321 struct efx_nic *efx = pci_get_drvdata(pdev); 3322 3323 if (state == pci_channel_io_perm_failure) 3324 return PCI_ERS_RESULT_DISCONNECT; 3325 3326 rtnl_lock(); 3327 3328 if (efx->state != STATE_DISABLED) { 3329 efx->state = STATE_RECOVERY; 3330 efx->reset_pending = 0; 3331 3332 efx_device_detach_sync(efx); 3333 3334 efx_stop_all(efx); 3335 efx_disable_interrupts(efx); 3336 3337 status = PCI_ERS_RESULT_NEED_RESET; 3338 } else { 3339 /* If the interface is disabled we don't want to do anything 3340 * with it. 3341 */ 3342 status = PCI_ERS_RESULT_RECOVERED; 3343 } 3344 3345 rtnl_unlock(); 3346 3347 pci_disable_device(pdev); 3348 3349 return status; 3350 } 3351 3352 /* Fake a successful reset, which will be performed later in efx_io_resume. */ 3353 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) 3354 { 3355 struct efx_nic *efx = pci_get_drvdata(pdev); 3356 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3357 int rc; 3358 3359 if (pci_enable_device(pdev)) { 3360 netif_err(efx, hw, efx->net_dev, 3361 "Cannot re-enable PCI device after reset.\n"); 3362 status = PCI_ERS_RESULT_DISCONNECT; 3363 } 3364 3365 rc = pci_cleanup_aer_uncorrect_error_status(pdev); 3366 if (rc) { 3367 netif_err(efx, hw, efx->net_dev, 3368 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); 3369 /* Non-fatal error. Continue. */ 3370 } 3371 3372 return status; 3373 } 3374 3375 /* Perform the actual reset and resume I/O operations. */ 3376 static void efx_io_resume(struct pci_dev *pdev) 3377 { 3378 struct efx_nic *efx = pci_get_drvdata(pdev); 3379 int rc; 3380 3381 rtnl_lock(); 3382 3383 if (efx->state == STATE_DISABLED) 3384 goto out; 3385 3386 rc = efx_reset(efx, RESET_TYPE_ALL); 3387 if (rc) { 3388 netif_err(efx, hw, efx->net_dev, 3389 "efx_reset failed after PCI error (%d)\n", rc); 3390 } else { 3391 efx->state = STATE_READY; 3392 netif_dbg(efx, hw, efx->net_dev, 3393 "Done resetting and resuming IO after PCI error.\n"); 3394 } 3395 3396 out: 3397 rtnl_unlock(); 3398 } 3399 3400 /* For simplicity and reliability, we always require a slot reset and try to 3401 * reset the hardware when a pci error affecting the device is detected. 3402 * We leave both the link_reset and mmio_enabled callback unimplemented: 3403 * with our request for slot reset the mmio_enabled callback will never be 3404 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3405 */ 3406 static struct pci_error_handlers efx_err_handlers = { 3407 .error_detected = efx_io_error_detected, 3408 .slot_reset = efx_io_slot_reset, 3409 .resume = efx_io_resume, 3410 }; 3411 3412 static struct pci_driver efx_pci_driver = { 3413 .name = KBUILD_MODNAME, 3414 .id_table = efx_pci_table, 3415 .probe = efx_pci_probe, 3416 .remove = efx_pci_remove, 3417 .driver.pm = &efx_pm_ops, 3418 .err_handler = &efx_err_handlers, 3419 #ifdef CONFIG_SFC_SRIOV 3420 .sriov_configure = efx_pci_sriov_configure, 3421 #endif 3422 }; 3423 3424 /************************************************************************** 3425 * 3426 * Kernel module interface 3427 * 3428 *************************************************************************/ 3429 3430 module_param(interrupt_mode, uint, 0444); 3431 MODULE_PARM_DESC(interrupt_mode, 3432 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 3433 3434 static int __init efx_init_module(void) 3435 { 3436 int rc; 3437 3438 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 3439 3440 rc = register_netdevice_notifier(&efx_netdev_notifier); 3441 if (rc) 3442 goto err_notifier; 3443 3444 #ifdef CONFIG_SFC_SRIOV 3445 rc = efx_init_sriov(); 3446 if (rc) 3447 goto err_sriov; 3448 #endif 3449 3450 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 3451 if (!reset_workqueue) { 3452 rc = -ENOMEM; 3453 goto err_reset; 3454 } 3455 3456 rc = pci_register_driver(&efx_pci_driver); 3457 if (rc < 0) 3458 goto err_pci; 3459 3460 return 0; 3461 3462 err_pci: 3463 destroy_workqueue(reset_workqueue); 3464 err_reset: 3465 #ifdef CONFIG_SFC_SRIOV 3466 efx_fini_sriov(); 3467 err_sriov: 3468 #endif 3469 unregister_netdevice_notifier(&efx_netdev_notifier); 3470 err_notifier: 3471 return rc; 3472 } 3473 3474 static void __exit efx_exit_module(void) 3475 { 3476 printk(KERN_INFO "Solarflare NET driver unloading\n"); 3477 3478 pci_unregister_driver(&efx_pci_driver); 3479 destroy_workqueue(reset_workqueue); 3480 #ifdef CONFIG_SFC_SRIOV 3481 efx_fini_sriov(); 3482 #endif 3483 unregister_netdevice_notifier(&efx_netdev_notifier); 3484 3485 } 3486 3487 module_init(efx_init_module); 3488 module_exit(efx_exit_module); 3489 3490 MODULE_AUTHOR("Solarflare Communications and " 3491 "Michael Brown <mbrown@fensystems.co.uk>"); 3492 MODULE_DESCRIPTION("Solarflare network driver"); 3493 MODULE_LICENSE("GPL"); 3494 MODULE_DEVICE_TABLE(pci, efx_pci_table); 3495