1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/delay.h> 13 #include <linux/notifier.h> 14 #include <linux/ip.h> 15 #include <linux/tcp.h> 16 #include <linux/in.h> 17 #include <linux/ethtool.h> 18 #include <linux/topology.h> 19 #include <linux/gfp.h> 20 #include <linux/aer.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include "efx.h" 24 #include "nic.h" 25 #include "selftest.h" 26 27 #include "workarounds.h" 28 29 /************************************************************************** 30 * 31 * Type name strings 32 * 33 ************************************************************************** 34 */ 35 36 /* Loopback mode names (see LOOPBACK_MODE()) */ 37 const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX; 38 const char *const ef4_loopback_mode_names[] = { 39 [LOOPBACK_NONE] = "NONE", 40 [LOOPBACK_DATA] = "DATAPATH", 41 [LOOPBACK_GMAC] = "GMAC", 42 [LOOPBACK_XGMII] = "XGMII", 43 [LOOPBACK_XGXS] = "XGXS", 44 [LOOPBACK_XAUI] = "XAUI", 45 [LOOPBACK_GMII] = "GMII", 46 [LOOPBACK_SGMII] = "SGMII", 47 [LOOPBACK_XGBR] = "XGBR", 48 [LOOPBACK_XFI] = "XFI", 49 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 50 [LOOPBACK_GMII_FAR] = "GMII_FAR", 51 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 52 [LOOPBACK_XFI_FAR] = "XFI_FAR", 53 [LOOPBACK_GPHY] = "GPHY", 54 [LOOPBACK_PHYXS] = "PHYXS", 55 [LOOPBACK_PCS] = "PCS", 56 [LOOPBACK_PMAPMD] = "PMA/PMD", 57 [LOOPBACK_XPORT] = "XPORT", 58 [LOOPBACK_XGMII_WS] = "XGMII_WS", 59 [LOOPBACK_XAUI_WS] = "XAUI_WS", 60 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 61 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 62 [LOOPBACK_GMII_WS] = "GMII_WS", 63 [LOOPBACK_XFI_WS] = "XFI_WS", 64 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 65 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 66 }; 67 68 const unsigned int ef4_reset_type_max = RESET_TYPE_MAX; 69 const char *const ef4_reset_type_names[] = { 70 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 71 [RESET_TYPE_ALL] = "ALL", 72 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 73 [RESET_TYPE_WORLD] = "WORLD", 74 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 75 [RESET_TYPE_DATAPATH] = "DATAPATH", 76 [RESET_TYPE_DISABLE] = "DISABLE", 77 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 78 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 79 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 80 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 81 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 82 }; 83 84 /* Reset workqueue. If any NIC has a hardware failure then a reset will be 85 * queued onto this work queue. This is not a per-nic work queue, because 86 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised. 87 */ 88 static struct workqueue_struct *reset_workqueue; 89 90 /* How often and how many times to poll for a reset while waiting for a 91 * BIST that another function started to complete. 92 */ 93 #define BIST_WAIT_DELAY_MS 100 94 #define BIST_WAIT_DELAY_COUNT 100 95 96 /************************************************************************** 97 * 98 * Configurable values 99 * 100 *************************************************************************/ 101 102 /* 103 * Use separate channels for TX and RX events 104 * 105 * Set this to 1 to use separate channels for TX and RX. It allows us 106 * to control interrupt affinity separately for TX and RX. 107 * 108 * This is only used in MSI-X interrupt mode 109 */ 110 bool ef4_separate_tx_channels; 111 module_param(ef4_separate_tx_channels, bool, 0444); 112 MODULE_PARM_DESC(ef4_separate_tx_channels, 113 "Use separate channels for TX and RX"); 114 115 /* This is the time (in jiffies) between invocations of the hardware 116 * monitor. 117 * On Falcon-based NICs, this will: 118 * - Check the on-board hardware monitor; 119 * - Poll the link state and reconfigure the hardware as necessary. 120 * On Siena-based NICs for power systems with EEH support, this will give EEH a 121 * chance to start. 122 */ 123 static unsigned int ef4_monitor_interval = 1 * HZ; 124 125 /* Initial interrupt moderation settings. They can be modified after 126 * module load with ethtool. 127 * 128 * The default for RX should strike a balance between increasing the 129 * round-trip latency and reducing overhead. 130 */ 131 static unsigned int rx_irq_mod_usec = 60; 132 133 /* Initial interrupt moderation settings. They can be modified after 134 * module load with ethtool. 135 * 136 * This default is chosen to ensure that a 10G link does not go idle 137 * while a TX queue is stopped after it has become full. A queue is 138 * restarted when it drops below half full. The time this takes (assuming 139 * worst case 3 descriptors per packet and 1024 descriptors) is 140 * 512 / 3 * 1.2 = 205 usec. 141 */ 142 static unsigned int tx_irq_mod_usec = 150; 143 144 /* This is the first interrupt mode to try out of: 145 * 0 => MSI-X 146 * 1 => MSI 147 * 2 => legacy 148 */ 149 static unsigned int interrupt_mode; 150 151 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 152 * i.e. the number of CPUs among which we may distribute simultaneous 153 * interrupt handling. 154 * 155 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 156 * The default (0) means to assign an interrupt to each core. 157 */ 158 static unsigned int rss_cpus; 159 module_param(rss_cpus, uint, 0444); 160 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 161 162 static bool phy_flash_cfg; 163 module_param(phy_flash_cfg, bool, 0644); 164 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 165 166 static unsigned irq_adapt_low_thresh = 8000; 167 module_param(irq_adapt_low_thresh, uint, 0644); 168 MODULE_PARM_DESC(irq_adapt_low_thresh, 169 "Threshold score for reducing IRQ moderation"); 170 171 static unsigned irq_adapt_high_thresh = 16000; 172 module_param(irq_adapt_high_thresh, uint, 0644); 173 MODULE_PARM_DESC(irq_adapt_high_thresh, 174 "Threshold score for increasing IRQ moderation"); 175 176 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 177 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 178 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 179 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 180 module_param(debug, uint, 0); 181 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 182 183 /************************************************************************** 184 * 185 * Utility functions and prototypes 186 * 187 *************************************************************************/ 188 189 static int ef4_soft_enable_interrupts(struct ef4_nic *efx); 190 static void ef4_soft_disable_interrupts(struct ef4_nic *efx); 191 static void ef4_remove_channel(struct ef4_channel *channel); 192 static void ef4_remove_channels(struct ef4_nic *efx); 193 static const struct ef4_channel_type ef4_default_channel_type; 194 static void ef4_remove_port(struct ef4_nic *efx); 195 static void ef4_init_napi_channel(struct ef4_channel *channel); 196 static void ef4_fini_napi(struct ef4_nic *efx); 197 static void ef4_fini_napi_channel(struct ef4_channel *channel); 198 static void ef4_fini_struct(struct ef4_nic *efx); 199 static void ef4_start_all(struct ef4_nic *efx); 200 static void ef4_stop_all(struct ef4_nic *efx); 201 202 #define EF4_ASSERT_RESET_SERIALISED(efx) \ 203 do { \ 204 if ((efx->state == STATE_READY) || \ 205 (efx->state == STATE_RECOVERY) || \ 206 (efx->state == STATE_DISABLED)) \ 207 ASSERT_RTNL(); \ 208 } while (0) 209 210 static int ef4_check_disabled(struct ef4_nic *efx) 211 { 212 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { 213 netif_err(efx, drv, efx->net_dev, 214 "device is disabled due to earlier errors\n"); 215 return -EIO; 216 } 217 return 0; 218 } 219 220 /************************************************************************** 221 * 222 * Event queue processing 223 * 224 *************************************************************************/ 225 226 /* Process channel's event queue 227 * 228 * This function is responsible for processing the event queue of a 229 * single channel. The caller must guarantee that this function will 230 * never be concurrently called more than once on the same channel, 231 * though different channels may be being processed concurrently. 232 */ 233 static int ef4_process_channel(struct ef4_channel *channel, int budget) 234 { 235 struct ef4_tx_queue *tx_queue; 236 int spent; 237 238 if (unlikely(!channel->enabled)) 239 return 0; 240 241 ef4_for_each_channel_tx_queue(tx_queue, channel) { 242 tx_queue->pkts_compl = 0; 243 tx_queue->bytes_compl = 0; 244 } 245 246 spent = ef4_nic_process_eventq(channel, budget); 247 if (spent && ef4_channel_has_rx_queue(channel)) { 248 struct ef4_rx_queue *rx_queue = 249 ef4_channel_get_rx_queue(channel); 250 251 ef4_rx_flush_packet(channel); 252 ef4_fast_push_rx_descriptors(rx_queue, true); 253 } 254 255 /* Update BQL */ 256 ef4_for_each_channel_tx_queue(tx_queue, channel) { 257 if (tx_queue->bytes_compl) { 258 netdev_tx_completed_queue(tx_queue->core_txq, 259 tx_queue->pkts_compl, tx_queue->bytes_compl); 260 } 261 } 262 263 return spent; 264 } 265 266 /* NAPI poll handler 267 * 268 * NAPI guarantees serialisation of polls of the same device, which 269 * provides the guarantee required by ef4_process_channel(). 270 */ 271 static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel) 272 { 273 int step = efx->irq_mod_step_us; 274 275 if (channel->irq_mod_score < irq_adapt_low_thresh) { 276 if (channel->irq_moderation_us > step) { 277 channel->irq_moderation_us -= step; 278 efx->type->push_irq_moderation(channel); 279 } 280 } else if (channel->irq_mod_score > irq_adapt_high_thresh) { 281 if (channel->irq_moderation_us < 282 efx->irq_rx_moderation_us) { 283 channel->irq_moderation_us += step; 284 efx->type->push_irq_moderation(channel); 285 } 286 } 287 288 channel->irq_count = 0; 289 channel->irq_mod_score = 0; 290 } 291 292 static int ef4_poll(struct napi_struct *napi, int budget) 293 { 294 struct ef4_channel *channel = 295 container_of(napi, struct ef4_channel, napi_str); 296 struct ef4_nic *efx = channel->efx; 297 int spent; 298 299 netif_vdbg(efx, intr, efx->net_dev, 300 "channel %d NAPI poll executing on CPU %d\n", 301 channel->channel, raw_smp_processor_id()); 302 303 spent = ef4_process_channel(channel, budget); 304 305 if (spent < budget) { 306 if (ef4_channel_has_rx_queue(channel) && 307 efx->irq_rx_adaptive && 308 unlikely(++channel->irq_count == 1000)) { 309 ef4_update_irq_mod(efx, channel); 310 } 311 312 ef4_filter_rfs_expire(channel); 313 314 /* There is no race here; although napi_disable() will 315 * only wait for napi_complete(), this isn't a problem 316 * since ef4_nic_eventq_read_ack() will have no effect if 317 * interrupts have already been disabled. 318 */ 319 napi_complete_done(napi, spent); 320 ef4_nic_eventq_read_ack(channel); 321 } 322 323 return spent; 324 } 325 326 /* Create event queue 327 * Event queue memory allocations are done only once. If the channel 328 * is reset, the memory buffer will be reused; this guards against 329 * errors during channel reset and also simplifies interrupt handling. 330 */ 331 static int ef4_probe_eventq(struct ef4_channel *channel) 332 { 333 struct ef4_nic *efx = channel->efx; 334 unsigned long entries; 335 336 netif_dbg(efx, probe, efx->net_dev, 337 "chan %d create event queue\n", channel->channel); 338 339 /* Build an event queue with room for one event per tx and rx buffer, 340 * plus some extra for link state events and MCDI completions. */ 341 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 342 EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE); 343 channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1; 344 345 return ef4_nic_probe_eventq(channel); 346 } 347 348 /* Prepare channel's event queue */ 349 static int ef4_init_eventq(struct ef4_channel *channel) 350 { 351 struct ef4_nic *efx = channel->efx; 352 int rc; 353 354 EF4_WARN_ON_PARANOID(channel->eventq_init); 355 356 netif_dbg(efx, drv, efx->net_dev, 357 "chan %d init event queue\n", channel->channel); 358 359 rc = ef4_nic_init_eventq(channel); 360 if (rc == 0) { 361 efx->type->push_irq_moderation(channel); 362 channel->eventq_read_ptr = 0; 363 channel->eventq_init = true; 364 } 365 return rc; 366 } 367 368 /* Enable event queue processing and NAPI */ 369 void ef4_start_eventq(struct ef4_channel *channel) 370 { 371 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 372 "chan %d start event queue\n", channel->channel); 373 374 /* Make sure the NAPI handler sees the enabled flag set */ 375 channel->enabled = true; 376 smp_wmb(); 377 378 napi_enable(&channel->napi_str); 379 ef4_nic_eventq_read_ack(channel); 380 } 381 382 /* Disable event queue processing and NAPI */ 383 void ef4_stop_eventq(struct ef4_channel *channel) 384 { 385 if (!channel->enabled) 386 return; 387 388 napi_disable(&channel->napi_str); 389 channel->enabled = false; 390 } 391 392 static void ef4_fini_eventq(struct ef4_channel *channel) 393 { 394 if (!channel->eventq_init) 395 return; 396 397 netif_dbg(channel->efx, drv, channel->efx->net_dev, 398 "chan %d fini event queue\n", channel->channel); 399 400 ef4_nic_fini_eventq(channel); 401 channel->eventq_init = false; 402 } 403 404 static void ef4_remove_eventq(struct ef4_channel *channel) 405 { 406 netif_dbg(channel->efx, drv, channel->efx->net_dev, 407 "chan %d remove event queue\n", channel->channel); 408 409 ef4_nic_remove_eventq(channel); 410 } 411 412 /************************************************************************** 413 * 414 * Channel handling 415 * 416 *************************************************************************/ 417 418 /* Allocate and initialise a channel structure. */ 419 static struct ef4_channel * 420 ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel) 421 { 422 struct ef4_channel *channel; 423 struct ef4_rx_queue *rx_queue; 424 struct ef4_tx_queue *tx_queue; 425 int j; 426 427 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 428 if (!channel) 429 return NULL; 430 431 channel->efx = efx; 432 channel->channel = i; 433 channel->type = &ef4_default_channel_type; 434 435 for (j = 0; j < EF4_TXQ_TYPES; j++) { 436 tx_queue = &channel->tx_queue[j]; 437 tx_queue->efx = efx; 438 tx_queue->queue = i * EF4_TXQ_TYPES + j; 439 tx_queue->channel = channel; 440 } 441 442 rx_queue = &channel->rx_queue; 443 rx_queue->efx = efx; 444 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); 445 446 return channel; 447 } 448 449 /* Allocate and initialise a channel structure, copying parameters 450 * (but not resources) from an old channel structure. 451 */ 452 static struct ef4_channel * 453 ef4_copy_channel(const struct ef4_channel *old_channel) 454 { 455 struct ef4_channel *channel; 456 struct ef4_rx_queue *rx_queue; 457 struct ef4_tx_queue *tx_queue; 458 int j; 459 460 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 461 if (!channel) 462 return NULL; 463 464 *channel = *old_channel; 465 466 channel->napi_dev = NULL; 467 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); 468 channel->napi_str.napi_id = 0; 469 channel->napi_str.state = 0; 470 memset(&channel->eventq, 0, sizeof(channel->eventq)); 471 472 for (j = 0; j < EF4_TXQ_TYPES; j++) { 473 tx_queue = &channel->tx_queue[j]; 474 if (tx_queue->channel) 475 tx_queue->channel = channel; 476 tx_queue->buffer = NULL; 477 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 478 } 479 480 rx_queue = &channel->rx_queue; 481 rx_queue->buffer = NULL; 482 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 483 timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); 484 485 return channel; 486 } 487 488 static int ef4_probe_channel(struct ef4_channel *channel) 489 { 490 struct ef4_tx_queue *tx_queue; 491 struct ef4_rx_queue *rx_queue; 492 int rc; 493 494 netif_dbg(channel->efx, probe, channel->efx->net_dev, 495 "creating channel %d\n", channel->channel); 496 497 rc = channel->type->pre_probe(channel); 498 if (rc) 499 goto fail; 500 501 rc = ef4_probe_eventq(channel); 502 if (rc) 503 goto fail; 504 505 ef4_for_each_channel_tx_queue(tx_queue, channel) { 506 rc = ef4_probe_tx_queue(tx_queue); 507 if (rc) 508 goto fail; 509 } 510 511 ef4_for_each_channel_rx_queue(rx_queue, channel) { 512 rc = ef4_probe_rx_queue(rx_queue); 513 if (rc) 514 goto fail; 515 } 516 517 return 0; 518 519 fail: 520 ef4_remove_channel(channel); 521 return rc; 522 } 523 524 static void 525 ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len) 526 { 527 struct ef4_nic *efx = channel->efx; 528 const char *type; 529 int number; 530 531 number = channel->channel; 532 if (efx->tx_channel_offset == 0) { 533 type = ""; 534 } else if (channel->channel < efx->tx_channel_offset) { 535 type = "-rx"; 536 } else { 537 type = "-tx"; 538 number -= efx->tx_channel_offset; 539 } 540 snprintf(buf, len, "%s%s-%d", efx->name, type, number); 541 } 542 543 static void ef4_set_channel_names(struct ef4_nic *efx) 544 { 545 struct ef4_channel *channel; 546 547 ef4_for_each_channel(channel, efx) 548 channel->type->get_name(channel, 549 efx->msi_context[channel->channel].name, 550 sizeof(efx->msi_context[0].name)); 551 } 552 553 static int ef4_probe_channels(struct ef4_nic *efx) 554 { 555 struct ef4_channel *channel; 556 int rc; 557 558 /* Restart special buffer allocation */ 559 efx->next_buffer_table = 0; 560 561 /* Probe channels in reverse, so that any 'extra' channels 562 * use the start of the buffer table. This allows the traffic 563 * channels to be resized without moving them or wasting the 564 * entries before them. 565 */ 566 ef4_for_each_channel_rev(channel, efx) { 567 rc = ef4_probe_channel(channel); 568 if (rc) { 569 netif_err(efx, probe, efx->net_dev, 570 "failed to create channel %d\n", 571 channel->channel); 572 goto fail; 573 } 574 } 575 ef4_set_channel_names(efx); 576 577 return 0; 578 579 fail: 580 ef4_remove_channels(efx); 581 return rc; 582 } 583 584 /* Channels are shutdown and reinitialised whilst the NIC is running 585 * to propagate configuration changes (mtu, checksum offload), or 586 * to clear hardware error conditions 587 */ 588 static void ef4_start_datapath(struct ef4_nic *efx) 589 { 590 netdev_features_t old_features = efx->net_dev->features; 591 bool old_rx_scatter = efx->rx_scatter; 592 struct ef4_tx_queue *tx_queue; 593 struct ef4_rx_queue *rx_queue; 594 struct ef4_channel *channel; 595 size_t rx_buf_len; 596 597 /* Calculate the rx buffer allocation parameters required to 598 * support the current MTU, including padding for header 599 * alignment and overruns. 600 */ 601 efx->rx_dma_len = (efx->rx_prefix_size + 602 EF4_MAX_FRAME_LEN(efx->net_dev->mtu) + 603 efx->type->rx_buffer_padding); 604 rx_buf_len = (sizeof(struct ef4_rx_page_state) + 605 efx->rx_ip_align + efx->rx_dma_len); 606 if (rx_buf_len <= PAGE_SIZE) { 607 efx->rx_scatter = efx->type->always_rx_scatter; 608 efx->rx_buffer_order = 0; 609 } else if (efx->type->can_rx_scatter) { 610 BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 611 BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) + 612 2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE, 613 EF4_RX_BUF_ALIGNMENT) > 614 PAGE_SIZE); 615 efx->rx_scatter = true; 616 efx->rx_dma_len = EF4_RX_USR_BUF_SIZE; 617 efx->rx_buffer_order = 0; 618 } else { 619 efx->rx_scatter = false; 620 efx->rx_buffer_order = get_order(rx_buf_len); 621 } 622 623 ef4_rx_config_page_split(efx); 624 if (efx->rx_buffer_order) 625 netif_dbg(efx, drv, efx->net_dev, 626 "RX buf len=%u; page order=%u batch=%u\n", 627 efx->rx_dma_len, efx->rx_buffer_order, 628 efx->rx_pages_per_batch); 629 else 630 netif_dbg(efx, drv, efx->net_dev, 631 "RX buf len=%u step=%u bpp=%u; page batch=%u\n", 632 efx->rx_dma_len, efx->rx_page_buf_step, 633 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 634 635 /* Restore previously fixed features in hw_features and remove 636 * features which are fixed now 637 */ 638 efx->net_dev->hw_features |= efx->net_dev->features; 639 efx->net_dev->hw_features &= ~efx->fixed_features; 640 efx->net_dev->features |= efx->fixed_features; 641 if (efx->net_dev->features != old_features) 642 netdev_features_change(efx->net_dev); 643 644 /* RX filters may also have scatter-enabled flags */ 645 if (efx->rx_scatter != old_rx_scatter) 646 efx->type->filter_update_rx_scatter(efx); 647 648 /* We must keep at least one descriptor in a TX ring empty. 649 * We could avoid this when the queue size does not exactly 650 * match the hardware ring size, but it's not that important. 651 * Therefore we stop the queue when one more skb might fill 652 * the ring completely. We wake it when half way back to 653 * empty. 654 */ 655 efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx); 656 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; 657 658 /* Initialise the channels */ 659 ef4_for_each_channel(channel, efx) { 660 ef4_for_each_channel_tx_queue(tx_queue, channel) { 661 ef4_init_tx_queue(tx_queue); 662 atomic_inc(&efx->active_queues); 663 } 664 665 ef4_for_each_channel_rx_queue(rx_queue, channel) { 666 ef4_init_rx_queue(rx_queue); 667 atomic_inc(&efx->active_queues); 668 ef4_stop_eventq(channel); 669 ef4_fast_push_rx_descriptors(rx_queue, false); 670 ef4_start_eventq(channel); 671 } 672 673 WARN_ON(channel->rx_pkt_n_frags); 674 } 675 676 if (netif_device_present(efx->net_dev)) 677 netif_tx_wake_all_queues(efx->net_dev); 678 } 679 680 static void ef4_stop_datapath(struct ef4_nic *efx) 681 { 682 struct ef4_channel *channel; 683 struct ef4_tx_queue *tx_queue; 684 struct ef4_rx_queue *rx_queue; 685 int rc; 686 687 EF4_ASSERT_RESET_SERIALISED(efx); 688 BUG_ON(efx->port_enabled); 689 690 /* Stop RX refill */ 691 ef4_for_each_channel(channel, efx) { 692 ef4_for_each_channel_rx_queue(rx_queue, channel) 693 rx_queue->refill_enabled = false; 694 } 695 696 ef4_for_each_channel(channel, efx) { 697 /* RX packet processing is pipelined, so wait for the 698 * NAPI handler to complete. At least event queue 0 699 * might be kept active by non-data events, so don't 700 * use napi_synchronize() but actually disable NAPI 701 * temporarily. 702 */ 703 if (ef4_channel_has_rx_queue(channel)) { 704 ef4_stop_eventq(channel); 705 ef4_start_eventq(channel); 706 } 707 } 708 709 rc = efx->type->fini_dmaq(efx); 710 if (rc && EF4_WORKAROUND_7803(efx)) { 711 /* Schedule a reset to recover from the flush failure. The 712 * descriptor caches reference memory we're about to free, 713 * but falcon_reconfigure_mac_wrapper() won't reconnect 714 * the MACs because of the pending reset. 715 */ 716 netif_err(efx, drv, efx->net_dev, 717 "Resetting to recover from flush failure\n"); 718 ef4_schedule_reset(efx, RESET_TYPE_ALL); 719 } else if (rc) { 720 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 721 } else { 722 netif_dbg(efx, drv, efx->net_dev, 723 "successfully flushed all queues\n"); 724 } 725 726 ef4_for_each_channel(channel, efx) { 727 ef4_for_each_channel_rx_queue(rx_queue, channel) 728 ef4_fini_rx_queue(rx_queue); 729 ef4_for_each_possible_channel_tx_queue(tx_queue, channel) 730 ef4_fini_tx_queue(tx_queue); 731 } 732 } 733 734 static void ef4_remove_channel(struct ef4_channel *channel) 735 { 736 struct ef4_tx_queue *tx_queue; 737 struct ef4_rx_queue *rx_queue; 738 739 netif_dbg(channel->efx, drv, channel->efx->net_dev, 740 "destroy chan %d\n", channel->channel); 741 742 ef4_for_each_channel_rx_queue(rx_queue, channel) 743 ef4_remove_rx_queue(rx_queue); 744 ef4_for_each_possible_channel_tx_queue(tx_queue, channel) 745 ef4_remove_tx_queue(tx_queue); 746 ef4_remove_eventq(channel); 747 channel->type->post_remove(channel); 748 } 749 750 static void ef4_remove_channels(struct ef4_nic *efx) 751 { 752 struct ef4_channel *channel; 753 754 ef4_for_each_channel(channel, efx) 755 ef4_remove_channel(channel); 756 } 757 758 int 759 ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries) 760 { 761 struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel; 762 u32 old_rxq_entries, old_txq_entries; 763 unsigned i, next_buffer_table = 0; 764 int rc, rc2; 765 766 rc = ef4_check_disabled(efx); 767 if (rc) 768 return rc; 769 770 /* Not all channels should be reallocated. We must avoid 771 * reallocating their buffer table entries. 772 */ 773 ef4_for_each_channel(channel, efx) { 774 struct ef4_rx_queue *rx_queue; 775 struct ef4_tx_queue *tx_queue; 776 777 if (channel->type->copy) 778 continue; 779 next_buffer_table = max(next_buffer_table, 780 channel->eventq.index + 781 channel->eventq.entries); 782 ef4_for_each_channel_rx_queue(rx_queue, channel) 783 next_buffer_table = max(next_buffer_table, 784 rx_queue->rxd.index + 785 rx_queue->rxd.entries); 786 ef4_for_each_channel_tx_queue(tx_queue, channel) 787 next_buffer_table = max(next_buffer_table, 788 tx_queue->txd.index + 789 tx_queue->txd.entries); 790 } 791 792 ef4_device_detach_sync(efx); 793 ef4_stop_all(efx); 794 ef4_soft_disable_interrupts(efx); 795 796 /* Clone channels (where possible) */ 797 memset(other_channel, 0, sizeof(other_channel)); 798 for (i = 0; i < efx->n_channels; i++) { 799 channel = efx->channel[i]; 800 if (channel->type->copy) 801 channel = channel->type->copy(channel); 802 if (!channel) { 803 rc = -ENOMEM; 804 goto out; 805 } 806 other_channel[i] = channel; 807 } 808 809 /* Swap entry counts and channel pointers */ 810 old_rxq_entries = efx->rxq_entries; 811 old_txq_entries = efx->txq_entries; 812 efx->rxq_entries = rxq_entries; 813 efx->txq_entries = txq_entries; 814 for (i = 0; i < efx->n_channels; i++) { 815 swap(efx->channel[i], other_channel[i]); 816 } 817 818 /* Restart buffer table allocation */ 819 efx->next_buffer_table = next_buffer_table; 820 821 for (i = 0; i < efx->n_channels; i++) { 822 channel = efx->channel[i]; 823 if (!channel->type->copy) 824 continue; 825 rc = ef4_probe_channel(channel); 826 if (rc) 827 goto rollback; 828 ef4_init_napi_channel(efx->channel[i]); 829 } 830 831 out: 832 /* Destroy unused channel structures */ 833 for (i = 0; i < efx->n_channels; i++) { 834 channel = other_channel[i]; 835 if (channel && channel->type->copy) { 836 ef4_fini_napi_channel(channel); 837 ef4_remove_channel(channel); 838 kfree(channel); 839 } 840 } 841 842 rc2 = ef4_soft_enable_interrupts(efx); 843 if (rc2) { 844 rc = rc ? rc : rc2; 845 netif_err(efx, drv, efx->net_dev, 846 "unable to restart interrupts on channel reallocation\n"); 847 ef4_schedule_reset(efx, RESET_TYPE_DISABLE); 848 } else { 849 ef4_start_all(efx); 850 netif_device_attach(efx->net_dev); 851 } 852 return rc; 853 854 rollback: 855 /* Swap back */ 856 efx->rxq_entries = old_rxq_entries; 857 efx->txq_entries = old_txq_entries; 858 for (i = 0; i < efx->n_channels; i++) { 859 swap(efx->channel[i], other_channel[i]); 860 } 861 goto out; 862 } 863 864 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue) 865 { 866 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 867 } 868 869 static const struct ef4_channel_type ef4_default_channel_type = { 870 .pre_probe = ef4_channel_dummy_op_int, 871 .post_remove = ef4_channel_dummy_op_void, 872 .get_name = ef4_get_channel_name, 873 .copy = ef4_copy_channel, 874 .keep_eventq = false, 875 }; 876 877 int ef4_channel_dummy_op_int(struct ef4_channel *channel) 878 { 879 return 0; 880 } 881 882 void ef4_channel_dummy_op_void(struct ef4_channel *channel) 883 { 884 } 885 886 /************************************************************************** 887 * 888 * Port handling 889 * 890 **************************************************************************/ 891 892 /* This ensures that the kernel is kept informed (via 893 * netif_carrier_on/off) of the link status, and also maintains the 894 * link status's stop on the port's TX queue. 895 */ 896 void ef4_link_status_changed(struct ef4_nic *efx) 897 { 898 struct ef4_link_state *link_state = &efx->link_state; 899 900 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 901 * that no events are triggered between unregister_netdev() and the 902 * driver unloading. A more general condition is that NETDEV_CHANGE 903 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 904 if (!netif_running(efx->net_dev)) 905 return; 906 907 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 908 efx->n_link_state_changes++; 909 910 if (link_state->up) 911 netif_carrier_on(efx->net_dev); 912 else 913 netif_carrier_off(efx->net_dev); 914 } 915 916 /* Status message for kernel log */ 917 if (link_state->up) 918 netif_info(efx, link, efx->net_dev, 919 "link up at %uMbps %s-duplex (MTU %d)\n", 920 link_state->speed, link_state->fd ? "full" : "half", 921 efx->net_dev->mtu); 922 else 923 netif_info(efx, link, efx->net_dev, "link down\n"); 924 } 925 926 void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising) 927 { 928 efx->link_advertising = advertising; 929 if (advertising) { 930 if (advertising & ADVERTISED_Pause) 931 efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX); 932 else 933 efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX); 934 if (advertising & ADVERTISED_Asym_Pause) 935 efx->wanted_fc ^= EF4_FC_TX; 936 } 937 } 938 939 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc) 940 { 941 efx->wanted_fc = wanted_fc; 942 if (efx->link_advertising) { 943 if (wanted_fc & EF4_FC_RX) 944 efx->link_advertising |= (ADVERTISED_Pause | 945 ADVERTISED_Asym_Pause); 946 else 947 efx->link_advertising &= ~(ADVERTISED_Pause | 948 ADVERTISED_Asym_Pause); 949 if (wanted_fc & EF4_FC_TX) 950 efx->link_advertising ^= ADVERTISED_Asym_Pause; 951 } 952 } 953 954 static void ef4_fini_port(struct ef4_nic *efx); 955 956 /* We assume that efx->type->reconfigure_mac will always try to sync RX 957 * filters and therefore needs to read-lock the filter table against freeing 958 */ 959 void ef4_mac_reconfigure(struct ef4_nic *efx) 960 { 961 down_read(&efx->filter_sem); 962 efx->type->reconfigure_mac(efx); 963 up_read(&efx->filter_sem); 964 } 965 966 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure 967 * the MAC appropriately. All other PHY configuration changes are pushed 968 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC 969 * through ef4_monitor(). 970 * 971 * Callers must hold the mac_lock 972 */ 973 int __ef4_reconfigure_port(struct ef4_nic *efx) 974 { 975 enum ef4_phy_mode phy_mode; 976 int rc; 977 978 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 979 980 /* Disable PHY transmit in mac level loopbacks */ 981 phy_mode = efx->phy_mode; 982 if (LOOPBACK_INTERNAL(efx)) 983 efx->phy_mode |= PHY_MODE_TX_DISABLED; 984 else 985 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 986 987 rc = efx->type->reconfigure_port(efx); 988 989 if (rc) 990 efx->phy_mode = phy_mode; 991 992 return rc; 993 } 994 995 /* Reinitialise the MAC to pick up new PHY settings, even if the port is 996 * disabled. */ 997 int ef4_reconfigure_port(struct ef4_nic *efx) 998 { 999 int rc; 1000 1001 EF4_ASSERT_RESET_SERIALISED(efx); 1002 1003 mutex_lock(&efx->mac_lock); 1004 rc = __ef4_reconfigure_port(efx); 1005 mutex_unlock(&efx->mac_lock); 1006 1007 return rc; 1008 } 1009 1010 /* Asynchronous work item for changing MAC promiscuity and multicast 1011 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 1012 * MAC directly. */ 1013 static void ef4_mac_work(struct work_struct *data) 1014 { 1015 struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work); 1016 1017 mutex_lock(&efx->mac_lock); 1018 if (efx->port_enabled) 1019 ef4_mac_reconfigure(efx); 1020 mutex_unlock(&efx->mac_lock); 1021 } 1022 1023 static int ef4_probe_port(struct ef4_nic *efx) 1024 { 1025 int rc; 1026 1027 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 1028 1029 if (phy_flash_cfg) 1030 efx->phy_mode = PHY_MODE_SPECIAL; 1031 1032 /* Connect up MAC/PHY operations table */ 1033 rc = efx->type->probe_port(efx); 1034 if (rc) 1035 return rc; 1036 1037 /* Initialise MAC address to permanent address */ 1038 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); 1039 1040 return 0; 1041 } 1042 1043 static int ef4_init_port(struct ef4_nic *efx) 1044 { 1045 int rc; 1046 1047 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 1048 1049 mutex_lock(&efx->mac_lock); 1050 1051 rc = efx->phy_op->init(efx); 1052 if (rc) 1053 goto fail1; 1054 1055 efx->port_initialized = true; 1056 1057 /* Reconfigure the MAC before creating dma queues (required for 1058 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 1059 ef4_mac_reconfigure(efx); 1060 1061 /* Ensure the PHY advertises the correct flow control settings */ 1062 rc = efx->phy_op->reconfigure(efx); 1063 if (rc && rc != -EPERM) 1064 goto fail2; 1065 1066 mutex_unlock(&efx->mac_lock); 1067 return 0; 1068 1069 fail2: 1070 efx->phy_op->fini(efx); 1071 fail1: 1072 mutex_unlock(&efx->mac_lock); 1073 return rc; 1074 } 1075 1076 static void ef4_start_port(struct ef4_nic *efx) 1077 { 1078 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 1079 BUG_ON(efx->port_enabled); 1080 1081 mutex_lock(&efx->mac_lock); 1082 efx->port_enabled = true; 1083 1084 /* Ensure MAC ingress/egress is enabled */ 1085 ef4_mac_reconfigure(efx); 1086 1087 mutex_unlock(&efx->mac_lock); 1088 } 1089 1090 /* Cancel work for MAC reconfiguration, periodic hardware monitoring 1091 * and the async self-test, wait for them to finish and prevent them 1092 * being scheduled again. This doesn't cover online resets, which 1093 * should only be cancelled when removing the device. 1094 */ 1095 static void ef4_stop_port(struct ef4_nic *efx) 1096 { 1097 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 1098 1099 EF4_ASSERT_RESET_SERIALISED(efx); 1100 1101 mutex_lock(&efx->mac_lock); 1102 efx->port_enabled = false; 1103 mutex_unlock(&efx->mac_lock); 1104 1105 /* Serialise against ef4_set_multicast_list() */ 1106 netif_addr_lock_bh(efx->net_dev); 1107 netif_addr_unlock_bh(efx->net_dev); 1108 1109 cancel_delayed_work_sync(&efx->monitor_work); 1110 ef4_selftest_async_cancel(efx); 1111 cancel_work_sync(&efx->mac_work); 1112 } 1113 1114 static void ef4_fini_port(struct ef4_nic *efx) 1115 { 1116 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 1117 1118 if (!efx->port_initialized) 1119 return; 1120 1121 efx->phy_op->fini(efx); 1122 efx->port_initialized = false; 1123 1124 efx->link_state.up = false; 1125 ef4_link_status_changed(efx); 1126 } 1127 1128 static void ef4_remove_port(struct ef4_nic *efx) 1129 { 1130 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 1131 1132 efx->type->remove_port(efx); 1133 } 1134 1135 /************************************************************************** 1136 * 1137 * NIC handling 1138 * 1139 **************************************************************************/ 1140 1141 static LIST_HEAD(ef4_primary_list); 1142 static LIST_HEAD(ef4_unassociated_list); 1143 1144 static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right) 1145 { 1146 return left->type == right->type && 1147 left->vpd_sn && right->vpd_sn && 1148 !strcmp(left->vpd_sn, right->vpd_sn); 1149 } 1150 1151 static void ef4_associate(struct ef4_nic *efx) 1152 { 1153 struct ef4_nic *other, *next; 1154 1155 if (efx->primary == efx) { 1156 /* Adding primary function; look for secondaries */ 1157 1158 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 1159 list_add_tail(&efx->node, &ef4_primary_list); 1160 1161 list_for_each_entry_safe(other, next, &ef4_unassociated_list, 1162 node) { 1163 if (ef4_same_controller(efx, other)) { 1164 list_del(&other->node); 1165 netif_dbg(other, probe, other->net_dev, 1166 "moving to secondary list of %s %s\n", 1167 pci_name(efx->pci_dev), 1168 efx->net_dev->name); 1169 list_add_tail(&other->node, 1170 &efx->secondary_list); 1171 other->primary = efx; 1172 } 1173 } 1174 } else { 1175 /* Adding secondary function; look for primary */ 1176 1177 list_for_each_entry(other, &ef4_primary_list, node) { 1178 if (ef4_same_controller(efx, other)) { 1179 netif_dbg(efx, probe, efx->net_dev, 1180 "adding to secondary list of %s %s\n", 1181 pci_name(other->pci_dev), 1182 other->net_dev->name); 1183 list_add_tail(&efx->node, 1184 &other->secondary_list); 1185 efx->primary = other; 1186 return; 1187 } 1188 } 1189 1190 netif_dbg(efx, probe, efx->net_dev, 1191 "adding to unassociated list\n"); 1192 list_add_tail(&efx->node, &ef4_unassociated_list); 1193 } 1194 } 1195 1196 static void ef4_dissociate(struct ef4_nic *efx) 1197 { 1198 struct ef4_nic *other, *next; 1199 1200 list_del(&efx->node); 1201 efx->primary = NULL; 1202 1203 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 1204 list_del(&other->node); 1205 netif_dbg(other, probe, other->net_dev, 1206 "moving to unassociated list\n"); 1207 list_add_tail(&other->node, &ef4_unassociated_list); 1208 other->primary = NULL; 1209 } 1210 } 1211 1212 /* This configures the PCI device to enable I/O and DMA. */ 1213 static int ef4_init_io(struct ef4_nic *efx) 1214 { 1215 struct pci_dev *pci_dev = efx->pci_dev; 1216 dma_addr_t dma_mask = efx->type->max_dma_mask; 1217 unsigned int mem_map_size = efx->type->mem_map_size(efx); 1218 int rc, bar; 1219 1220 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1221 1222 bar = efx->type->mem_bar; 1223 1224 rc = pci_enable_device(pci_dev); 1225 if (rc) { 1226 netif_err(efx, probe, efx->net_dev, 1227 "failed to enable PCI device\n"); 1228 goto fail1; 1229 } 1230 1231 pci_set_master(pci_dev); 1232 1233 /* Set the PCI DMA mask. Try all possibilities from our genuine mask 1234 * down to 32 bits, because some architectures will allow 40 bit 1235 * masks event though they reject 46 bit masks. 1236 */ 1237 while (dma_mask > 0x7fffffffUL) { 1238 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); 1239 if (rc == 0) 1240 break; 1241 dma_mask >>= 1; 1242 } 1243 if (rc) { 1244 netif_err(efx, probe, efx->net_dev, 1245 "could not find a suitable DMA mask\n"); 1246 goto fail2; 1247 } 1248 netif_dbg(efx, probe, efx->net_dev, 1249 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1250 1251 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); 1252 rc = pci_request_region(pci_dev, bar, "sfc"); 1253 if (rc) { 1254 netif_err(efx, probe, efx->net_dev, 1255 "request for memory BAR failed\n"); 1256 rc = -EIO; 1257 goto fail3; 1258 } 1259 efx->membase = ioremap(efx->membase_phys, mem_map_size); 1260 if (!efx->membase) { 1261 netif_err(efx, probe, efx->net_dev, 1262 "could not map memory BAR at %llx+%x\n", 1263 (unsigned long long)efx->membase_phys, mem_map_size); 1264 rc = -ENOMEM; 1265 goto fail4; 1266 } 1267 netif_dbg(efx, probe, efx->net_dev, 1268 "memory BAR at %llx+%x (virtual %p)\n", 1269 (unsigned long long)efx->membase_phys, mem_map_size, 1270 efx->membase); 1271 1272 return 0; 1273 1274 fail4: 1275 pci_release_region(efx->pci_dev, bar); 1276 fail3: 1277 efx->membase_phys = 0; 1278 fail2: 1279 pci_disable_device(efx->pci_dev); 1280 fail1: 1281 return rc; 1282 } 1283 1284 static void ef4_fini_io(struct ef4_nic *efx) 1285 { 1286 int bar; 1287 1288 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 1289 1290 if (efx->membase) { 1291 iounmap(efx->membase); 1292 efx->membase = NULL; 1293 } 1294 1295 if (efx->membase_phys) { 1296 bar = efx->type->mem_bar; 1297 pci_release_region(efx->pci_dev, bar); 1298 efx->membase_phys = 0; 1299 } 1300 1301 /* Don't disable bus-mastering if VFs are assigned */ 1302 if (!pci_vfs_assigned(efx->pci_dev)) 1303 pci_disable_device(efx->pci_dev); 1304 } 1305 1306 void ef4_set_default_rx_indir_table(struct ef4_nic *efx) 1307 { 1308 size_t i; 1309 1310 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1311 efx->rx_indir_table[i] = 1312 ethtool_rxfh_indir_default(i, efx->rss_spread); 1313 } 1314 1315 static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx) 1316 { 1317 cpumask_var_t thread_mask; 1318 unsigned int count; 1319 int cpu; 1320 1321 if (rss_cpus) { 1322 count = rss_cpus; 1323 } else { 1324 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 1325 netif_warn(efx, probe, efx->net_dev, 1326 "RSS disabled due to allocation failure\n"); 1327 return 1; 1328 } 1329 1330 count = 0; 1331 for_each_online_cpu(cpu) { 1332 if (!cpumask_test_cpu(cpu, thread_mask)) { 1333 ++count; 1334 cpumask_or(thread_mask, thread_mask, 1335 topology_sibling_cpumask(cpu)); 1336 } 1337 } 1338 1339 free_cpumask_var(thread_mask); 1340 } 1341 1342 if (count > EF4_MAX_RX_QUEUES) { 1343 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, 1344 "Reducing number of rx queues from %u to %u.\n", 1345 count, EF4_MAX_RX_QUEUES); 1346 count = EF4_MAX_RX_QUEUES; 1347 } 1348 1349 return count; 1350 } 1351 1352 /* Probe the number and type of interrupts we are able to obtain, and 1353 * the resulting numbers of channels and RX queues. 1354 */ 1355 static int ef4_probe_interrupts(struct ef4_nic *efx) 1356 { 1357 unsigned int extra_channels = 0; 1358 unsigned int i, j; 1359 int rc; 1360 1361 for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) 1362 if (efx->extra_channel_type[i]) 1363 ++extra_channels; 1364 1365 if (efx->interrupt_mode == EF4_INT_MODE_MSIX) { 1366 struct msix_entry xentries[EF4_MAX_CHANNELS]; 1367 unsigned int n_channels; 1368 1369 n_channels = ef4_wanted_parallelism(efx); 1370 if (ef4_separate_tx_channels) 1371 n_channels *= 2; 1372 n_channels += extra_channels; 1373 n_channels = min(n_channels, efx->max_channels); 1374 1375 for (i = 0; i < n_channels; i++) 1376 xentries[i].entry = i; 1377 rc = pci_enable_msix_range(efx->pci_dev, 1378 xentries, 1, n_channels); 1379 if (rc < 0) { 1380 /* Fall back to single channel MSI */ 1381 efx->interrupt_mode = EF4_INT_MODE_MSI; 1382 netif_err(efx, drv, efx->net_dev, 1383 "could not enable MSI-X\n"); 1384 } else if (rc < n_channels) { 1385 netif_err(efx, drv, efx->net_dev, 1386 "WARNING: Insufficient MSI-X vectors" 1387 " available (%d < %u).\n", rc, n_channels); 1388 netif_err(efx, drv, efx->net_dev, 1389 "WARNING: Performance may be reduced.\n"); 1390 n_channels = rc; 1391 } 1392 1393 if (rc > 0) { 1394 efx->n_channels = n_channels; 1395 if (n_channels > extra_channels) 1396 n_channels -= extra_channels; 1397 if (ef4_separate_tx_channels) { 1398 efx->n_tx_channels = min(max(n_channels / 2, 1399 1U), 1400 efx->max_tx_channels); 1401 efx->n_rx_channels = max(n_channels - 1402 efx->n_tx_channels, 1403 1U); 1404 } else { 1405 efx->n_tx_channels = min(n_channels, 1406 efx->max_tx_channels); 1407 efx->n_rx_channels = n_channels; 1408 } 1409 for (i = 0; i < efx->n_channels; i++) 1410 ef4_get_channel(efx, i)->irq = 1411 xentries[i].vector; 1412 } 1413 } 1414 1415 /* Try single interrupt MSI */ 1416 if (efx->interrupt_mode == EF4_INT_MODE_MSI) { 1417 efx->n_channels = 1; 1418 efx->n_rx_channels = 1; 1419 efx->n_tx_channels = 1; 1420 rc = pci_enable_msi(efx->pci_dev); 1421 if (rc == 0) { 1422 ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq; 1423 } else { 1424 netif_err(efx, drv, efx->net_dev, 1425 "could not enable MSI\n"); 1426 efx->interrupt_mode = EF4_INT_MODE_LEGACY; 1427 } 1428 } 1429 1430 /* Assume legacy interrupts */ 1431 if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) { 1432 efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0); 1433 efx->n_rx_channels = 1; 1434 efx->n_tx_channels = 1; 1435 efx->legacy_irq = efx->pci_dev->irq; 1436 } 1437 1438 /* Assign extra channels if possible */ 1439 j = efx->n_channels; 1440 for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) { 1441 if (!efx->extra_channel_type[i]) 1442 continue; 1443 if (efx->interrupt_mode != EF4_INT_MODE_MSIX || 1444 efx->n_channels <= extra_channels) { 1445 efx->extra_channel_type[i]->handle_no_channel(efx); 1446 } else { 1447 --j; 1448 ef4_get_channel(efx, j)->type = 1449 efx->extra_channel_type[i]; 1450 } 1451 } 1452 1453 efx->rss_spread = efx->n_rx_channels; 1454 1455 return 0; 1456 } 1457 1458 static int ef4_soft_enable_interrupts(struct ef4_nic *efx) 1459 { 1460 struct ef4_channel *channel, *end_channel; 1461 int rc; 1462 1463 BUG_ON(efx->state == STATE_DISABLED); 1464 1465 efx->irq_soft_enabled = true; 1466 smp_wmb(); 1467 1468 ef4_for_each_channel(channel, efx) { 1469 if (!channel->type->keep_eventq) { 1470 rc = ef4_init_eventq(channel); 1471 if (rc) 1472 goto fail; 1473 } 1474 ef4_start_eventq(channel); 1475 } 1476 1477 return 0; 1478 fail: 1479 end_channel = channel; 1480 ef4_for_each_channel(channel, efx) { 1481 if (channel == end_channel) 1482 break; 1483 ef4_stop_eventq(channel); 1484 if (!channel->type->keep_eventq) 1485 ef4_fini_eventq(channel); 1486 } 1487 1488 return rc; 1489 } 1490 1491 static void ef4_soft_disable_interrupts(struct ef4_nic *efx) 1492 { 1493 struct ef4_channel *channel; 1494 1495 if (efx->state == STATE_DISABLED) 1496 return; 1497 1498 efx->irq_soft_enabled = false; 1499 smp_wmb(); 1500 1501 if (efx->legacy_irq) 1502 synchronize_irq(efx->legacy_irq); 1503 1504 ef4_for_each_channel(channel, efx) { 1505 if (channel->irq) 1506 synchronize_irq(channel->irq); 1507 1508 ef4_stop_eventq(channel); 1509 if (!channel->type->keep_eventq) 1510 ef4_fini_eventq(channel); 1511 } 1512 } 1513 1514 static int ef4_enable_interrupts(struct ef4_nic *efx) 1515 { 1516 struct ef4_channel *channel, *end_channel; 1517 int rc; 1518 1519 BUG_ON(efx->state == STATE_DISABLED); 1520 1521 if (efx->eeh_disabled_legacy_irq) { 1522 enable_irq(efx->legacy_irq); 1523 efx->eeh_disabled_legacy_irq = false; 1524 } 1525 1526 efx->type->irq_enable_master(efx); 1527 1528 ef4_for_each_channel(channel, efx) { 1529 if (channel->type->keep_eventq) { 1530 rc = ef4_init_eventq(channel); 1531 if (rc) 1532 goto fail; 1533 } 1534 } 1535 1536 rc = ef4_soft_enable_interrupts(efx); 1537 if (rc) 1538 goto fail; 1539 1540 return 0; 1541 1542 fail: 1543 end_channel = channel; 1544 ef4_for_each_channel(channel, efx) { 1545 if (channel == end_channel) 1546 break; 1547 if (channel->type->keep_eventq) 1548 ef4_fini_eventq(channel); 1549 } 1550 1551 efx->type->irq_disable_non_ev(efx); 1552 1553 return rc; 1554 } 1555 1556 static void ef4_disable_interrupts(struct ef4_nic *efx) 1557 { 1558 struct ef4_channel *channel; 1559 1560 ef4_soft_disable_interrupts(efx); 1561 1562 ef4_for_each_channel(channel, efx) { 1563 if (channel->type->keep_eventq) 1564 ef4_fini_eventq(channel); 1565 } 1566 1567 efx->type->irq_disable_non_ev(efx); 1568 } 1569 1570 static void ef4_remove_interrupts(struct ef4_nic *efx) 1571 { 1572 struct ef4_channel *channel; 1573 1574 /* Remove MSI/MSI-X interrupts */ 1575 ef4_for_each_channel(channel, efx) 1576 channel->irq = 0; 1577 pci_disable_msi(efx->pci_dev); 1578 pci_disable_msix(efx->pci_dev); 1579 1580 /* Remove legacy interrupt */ 1581 efx->legacy_irq = 0; 1582 } 1583 1584 static void ef4_set_channels(struct ef4_nic *efx) 1585 { 1586 struct ef4_channel *channel; 1587 struct ef4_tx_queue *tx_queue; 1588 1589 efx->tx_channel_offset = 1590 ef4_separate_tx_channels ? 1591 efx->n_channels - efx->n_tx_channels : 0; 1592 1593 /* We need to mark which channels really have RX and TX 1594 * queues, and adjust the TX queue numbers if we have separate 1595 * RX-only and TX-only channels. 1596 */ 1597 ef4_for_each_channel(channel, efx) { 1598 if (channel->channel < efx->n_rx_channels) 1599 channel->rx_queue.core_index = channel->channel; 1600 else 1601 channel->rx_queue.core_index = -1; 1602 1603 ef4_for_each_channel_tx_queue(tx_queue, channel) 1604 tx_queue->queue -= (efx->tx_channel_offset * 1605 EF4_TXQ_TYPES); 1606 } 1607 } 1608 1609 static int ef4_probe_nic(struct ef4_nic *efx) 1610 { 1611 int rc; 1612 1613 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 1614 1615 /* Carry out hardware-type specific initialisation */ 1616 rc = efx->type->probe(efx); 1617 if (rc) 1618 return rc; 1619 1620 do { 1621 if (!efx->max_channels || !efx->max_tx_channels) { 1622 netif_err(efx, drv, efx->net_dev, 1623 "Insufficient resources to allocate" 1624 " any channels\n"); 1625 rc = -ENOSPC; 1626 goto fail1; 1627 } 1628 1629 /* Determine the number of channels and queues by trying 1630 * to hook in MSI-X interrupts. 1631 */ 1632 rc = ef4_probe_interrupts(efx); 1633 if (rc) 1634 goto fail1; 1635 1636 ef4_set_channels(efx); 1637 1638 /* dimension_resources can fail with EAGAIN */ 1639 rc = efx->type->dimension_resources(efx); 1640 if (rc != 0 && rc != -EAGAIN) 1641 goto fail2; 1642 1643 if (rc == -EAGAIN) 1644 /* try again with new max_channels */ 1645 ef4_remove_interrupts(efx); 1646 1647 } while (rc == -EAGAIN); 1648 1649 if (efx->n_channels > 1) 1650 netdev_rss_key_fill(&efx->rx_hash_key, 1651 sizeof(efx->rx_hash_key)); 1652 ef4_set_default_rx_indir_table(efx); 1653 1654 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1655 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1656 1657 /* Initialise the interrupt moderation settings */ 1658 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 1659 ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 1660 true); 1661 1662 return 0; 1663 1664 fail2: 1665 ef4_remove_interrupts(efx); 1666 fail1: 1667 efx->type->remove(efx); 1668 return rc; 1669 } 1670 1671 static void ef4_remove_nic(struct ef4_nic *efx) 1672 { 1673 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 1674 1675 ef4_remove_interrupts(efx); 1676 efx->type->remove(efx); 1677 } 1678 1679 static int ef4_probe_filters(struct ef4_nic *efx) 1680 { 1681 int rc; 1682 1683 spin_lock_init(&efx->filter_lock); 1684 init_rwsem(&efx->filter_sem); 1685 mutex_lock(&efx->mac_lock); 1686 down_write(&efx->filter_sem); 1687 rc = efx->type->filter_table_probe(efx); 1688 if (rc) 1689 goto out_unlock; 1690 1691 #ifdef CONFIG_RFS_ACCEL 1692 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1693 struct ef4_channel *channel; 1694 int i, success = 1; 1695 1696 ef4_for_each_channel(channel, efx) { 1697 channel->rps_flow_id = 1698 kcalloc(efx->type->max_rx_ip_filters, 1699 sizeof(*channel->rps_flow_id), 1700 GFP_KERNEL); 1701 if (!channel->rps_flow_id) 1702 success = 0; 1703 else 1704 for (i = 0; 1705 i < efx->type->max_rx_ip_filters; 1706 ++i) 1707 channel->rps_flow_id[i] = 1708 RPS_FLOW_ID_INVALID; 1709 } 1710 1711 if (!success) { 1712 ef4_for_each_channel(channel, efx) 1713 kfree(channel->rps_flow_id); 1714 efx->type->filter_table_remove(efx); 1715 rc = -ENOMEM; 1716 goto out_unlock; 1717 } 1718 1719 efx->rps_expire_index = efx->rps_expire_channel = 0; 1720 } 1721 #endif 1722 out_unlock: 1723 up_write(&efx->filter_sem); 1724 mutex_unlock(&efx->mac_lock); 1725 return rc; 1726 } 1727 1728 static void ef4_remove_filters(struct ef4_nic *efx) 1729 { 1730 #ifdef CONFIG_RFS_ACCEL 1731 struct ef4_channel *channel; 1732 1733 ef4_for_each_channel(channel, efx) 1734 kfree(channel->rps_flow_id); 1735 #endif 1736 down_write(&efx->filter_sem); 1737 efx->type->filter_table_remove(efx); 1738 up_write(&efx->filter_sem); 1739 } 1740 1741 static void ef4_restore_filters(struct ef4_nic *efx) 1742 { 1743 down_read(&efx->filter_sem); 1744 efx->type->filter_table_restore(efx); 1745 up_read(&efx->filter_sem); 1746 } 1747 1748 /************************************************************************** 1749 * 1750 * NIC startup/shutdown 1751 * 1752 *************************************************************************/ 1753 1754 static int ef4_probe_all(struct ef4_nic *efx) 1755 { 1756 int rc; 1757 1758 rc = ef4_probe_nic(efx); 1759 if (rc) { 1760 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1761 goto fail1; 1762 } 1763 1764 rc = ef4_probe_port(efx); 1765 if (rc) { 1766 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1767 goto fail2; 1768 } 1769 1770 BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT); 1771 if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) { 1772 rc = -EINVAL; 1773 goto fail3; 1774 } 1775 efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE; 1776 1777 rc = ef4_probe_filters(efx); 1778 if (rc) { 1779 netif_err(efx, probe, efx->net_dev, 1780 "failed to create filter tables\n"); 1781 goto fail4; 1782 } 1783 1784 rc = ef4_probe_channels(efx); 1785 if (rc) 1786 goto fail5; 1787 1788 return 0; 1789 1790 fail5: 1791 ef4_remove_filters(efx); 1792 fail4: 1793 fail3: 1794 ef4_remove_port(efx); 1795 fail2: 1796 ef4_remove_nic(efx); 1797 fail1: 1798 return rc; 1799 } 1800 1801 /* If the interface is supposed to be running but is not, start 1802 * the hardware and software data path, regular activity for the port 1803 * (MAC statistics, link polling, etc.) and schedule the port to be 1804 * reconfigured. Interrupts must already be enabled. This function 1805 * is safe to call multiple times, so long as the NIC is not disabled. 1806 * Requires the RTNL lock. 1807 */ 1808 static void ef4_start_all(struct ef4_nic *efx) 1809 { 1810 EF4_ASSERT_RESET_SERIALISED(efx); 1811 BUG_ON(efx->state == STATE_DISABLED); 1812 1813 /* Check that it is appropriate to restart the interface. All 1814 * of these flags are safe to read under just the rtnl lock */ 1815 if (efx->port_enabled || !netif_running(efx->net_dev) || 1816 efx->reset_pending) 1817 return; 1818 1819 ef4_start_port(efx); 1820 ef4_start_datapath(efx); 1821 1822 /* Start the hardware monitor if there is one */ 1823 if (efx->type->monitor != NULL) 1824 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1825 ef4_monitor_interval); 1826 1827 efx->type->start_stats(efx); 1828 efx->type->pull_stats(efx); 1829 spin_lock_bh(&efx->stats_lock); 1830 efx->type->update_stats(efx, NULL, NULL); 1831 spin_unlock_bh(&efx->stats_lock); 1832 } 1833 1834 /* Quiesce the hardware and software data path, and regular activity 1835 * for the port without bringing the link down. Safe to call multiple 1836 * times with the NIC in almost any state, but interrupts should be 1837 * enabled. Requires the RTNL lock. 1838 */ 1839 static void ef4_stop_all(struct ef4_nic *efx) 1840 { 1841 EF4_ASSERT_RESET_SERIALISED(efx); 1842 1843 /* port_enabled can be read safely under the rtnl lock */ 1844 if (!efx->port_enabled) 1845 return; 1846 1847 /* update stats before we go down so we can accurately count 1848 * rx_nodesc_drops 1849 */ 1850 efx->type->pull_stats(efx); 1851 spin_lock_bh(&efx->stats_lock); 1852 efx->type->update_stats(efx, NULL, NULL); 1853 spin_unlock_bh(&efx->stats_lock); 1854 efx->type->stop_stats(efx); 1855 ef4_stop_port(efx); 1856 1857 /* Stop the kernel transmit interface. This is only valid if 1858 * the device is stopped or detached; otherwise the watchdog 1859 * may fire immediately. 1860 */ 1861 WARN_ON(netif_running(efx->net_dev) && 1862 netif_device_present(efx->net_dev)); 1863 netif_tx_disable(efx->net_dev); 1864 1865 ef4_stop_datapath(efx); 1866 } 1867 1868 static void ef4_remove_all(struct ef4_nic *efx) 1869 { 1870 ef4_remove_channels(efx); 1871 ef4_remove_filters(efx); 1872 ef4_remove_port(efx); 1873 ef4_remove_nic(efx); 1874 } 1875 1876 /************************************************************************** 1877 * 1878 * Interrupt moderation 1879 * 1880 **************************************************************************/ 1881 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs) 1882 { 1883 if (usecs == 0) 1884 return 0; 1885 if (usecs * 1000 < efx->timer_quantum_ns) 1886 return 1; /* never round down to 0 */ 1887 return usecs * 1000 / efx->timer_quantum_ns; 1888 } 1889 1890 unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks) 1891 { 1892 /* We must round up when converting ticks to microseconds 1893 * because we round down when converting the other way. 1894 */ 1895 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); 1896 } 1897 1898 /* Set interrupt moderation parameters */ 1899 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs, 1900 unsigned int rx_usecs, bool rx_adaptive, 1901 bool rx_may_override_tx) 1902 { 1903 struct ef4_channel *channel; 1904 unsigned int timer_max_us; 1905 1906 EF4_ASSERT_RESET_SERIALISED(efx); 1907 1908 timer_max_us = efx->timer_max_ns / 1000; 1909 1910 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 1911 return -EINVAL; 1912 1913 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 1914 !rx_may_override_tx) { 1915 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 1916 "RX and TX IRQ moderation must be equal\n"); 1917 return -EINVAL; 1918 } 1919 1920 efx->irq_rx_adaptive = rx_adaptive; 1921 efx->irq_rx_moderation_us = rx_usecs; 1922 ef4_for_each_channel(channel, efx) { 1923 if (ef4_channel_has_rx_queue(channel)) 1924 channel->irq_moderation_us = rx_usecs; 1925 else if (ef4_channel_has_tx_queues(channel)) 1926 channel->irq_moderation_us = tx_usecs; 1927 } 1928 1929 return 0; 1930 } 1931 1932 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs, 1933 unsigned int *rx_usecs, bool *rx_adaptive) 1934 { 1935 *rx_adaptive = efx->irq_rx_adaptive; 1936 *rx_usecs = efx->irq_rx_moderation_us; 1937 1938 /* If channels are shared between RX and TX, so is IRQ 1939 * moderation. Otherwise, IRQ moderation is the same for all 1940 * TX channels and is not adaptive. 1941 */ 1942 if (efx->tx_channel_offset == 0) { 1943 *tx_usecs = *rx_usecs; 1944 } else { 1945 struct ef4_channel *tx_channel; 1946 1947 tx_channel = efx->channel[efx->tx_channel_offset]; 1948 *tx_usecs = tx_channel->irq_moderation_us; 1949 } 1950 } 1951 1952 /************************************************************************** 1953 * 1954 * Hardware monitor 1955 * 1956 **************************************************************************/ 1957 1958 /* Run periodically off the general workqueue */ 1959 static void ef4_monitor(struct work_struct *data) 1960 { 1961 struct ef4_nic *efx = container_of(data, struct ef4_nic, 1962 monitor_work.work); 1963 1964 netif_vdbg(efx, timer, efx->net_dev, 1965 "hardware monitor executing on CPU %d\n", 1966 raw_smp_processor_id()); 1967 BUG_ON(efx->type->monitor == NULL); 1968 1969 /* If the mac_lock is already held then it is likely a port 1970 * reconfiguration is already in place, which will likely do 1971 * most of the work of monitor() anyway. */ 1972 if (mutex_trylock(&efx->mac_lock)) { 1973 if (efx->port_enabled) 1974 efx->type->monitor(efx); 1975 mutex_unlock(&efx->mac_lock); 1976 } 1977 1978 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1979 ef4_monitor_interval); 1980 } 1981 1982 /************************************************************************** 1983 * 1984 * ioctls 1985 * 1986 *************************************************************************/ 1987 1988 /* Net device ioctl 1989 * Context: process, rtnl_lock() held. 1990 */ 1991 static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1992 { 1993 struct ef4_nic *efx = netdev_priv(net_dev); 1994 struct mii_ioctl_data *data = if_mii(ifr); 1995 1996 /* Convert phy_id from older PRTAD/DEVAD format */ 1997 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1998 (data->phy_id & 0xfc00) == 0x0400) 1999 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 2000 2001 return mdio_mii_ioctl(&efx->mdio, data, cmd); 2002 } 2003 2004 /************************************************************************** 2005 * 2006 * NAPI interface 2007 * 2008 **************************************************************************/ 2009 2010 static void ef4_init_napi_channel(struct ef4_channel *channel) 2011 { 2012 struct ef4_nic *efx = channel->efx; 2013 2014 channel->napi_dev = efx->net_dev; 2015 netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll); 2016 } 2017 2018 static void ef4_init_napi(struct ef4_nic *efx) 2019 { 2020 struct ef4_channel *channel; 2021 2022 ef4_for_each_channel(channel, efx) 2023 ef4_init_napi_channel(channel); 2024 } 2025 2026 static void ef4_fini_napi_channel(struct ef4_channel *channel) 2027 { 2028 if (channel->napi_dev) 2029 netif_napi_del(&channel->napi_str); 2030 2031 channel->napi_dev = NULL; 2032 } 2033 2034 static void ef4_fini_napi(struct ef4_nic *efx) 2035 { 2036 struct ef4_channel *channel; 2037 2038 ef4_for_each_channel(channel, efx) 2039 ef4_fini_napi_channel(channel); 2040 } 2041 2042 /************************************************************************** 2043 * 2044 * Kernel net device interface 2045 * 2046 *************************************************************************/ 2047 2048 /* Context: process, rtnl_lock() held. */ 2049 int ef4_net_open(struct net_device *net_dev) 2050 { 2051 struct ef4_nic *efx = netdev_priv(net_dev); 2052 int rc; 2053 2054 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 2055 raw_smp_processor_id()); 2056 2057 rc = ef4_check_disabled(efx); 2058 if (rc) 2059 return rc; 2060 if (efx->phy_mode & PHY_MODE_SPECIAL) 2061 return -EBUSY; 2062 2063 /* Notify the kernel of the link state polled during driver load, 2064 * before the monitor starts running */ 2065 ef4_link_status_changed(efx); 2066 2067 ef4_start_all(efx); 2068 ef4_selftest_async_start(efx); 2069 return 0; 2070 } 2071 2072 /* Context: process, rtnl_lock() held. 2073 * Note that the kernel will ignore our return code; this method 2074 * should really be a void. 2075 */ 2076 int ef4_net_stop(struct net_device *net_dev) 2077 { 2078 struct ef4_nic *efx = netdev_priv(net_dev); 2079 2080 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 2081 raw_smp_processor_id()); 2082 2083 /* Stop the device and flush all the channels */ 2084 ef4_stop_all(efx); 2085 2086 return 0; 2087 } 2088 2089 /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 2090 static void ef4_net_stats(struct net_device *net_dev, 2091 struct rtnl_link_stats64 *stats) 2092 { 2093 struct ef4_nic *efx = netdev_priv(net_dev); 2094 2095 spin_lock_bh(&efx->stats_lock); 2096 efx->type->update_stats(efx, NULL, stats); 2097 spin_unlock_bh(&efx->stats_lock); 2098 } 2099 2100 /* Context: netif_tx_lock held, BHs disabled. */ 2101 static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue) 2102 { 2103 struct ef4_nic *efx = netdev_priv(net_dev); 2104 2105 netif_err(efx, tx_err, efx->net_dev, 2106 "TX stuck with port_enabled=%d: resetting channels\n", 2107 efx->port_enabled); 2108 2109 ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 2110 } 2111 2112 2113 /* Context: process, rtnl_lock() held. */ 2114 static int ef4_change_mtu(struct net_device *net_dev, int new_mtu) 2115 { 2116 struct ef4_nic *efx = netdev_priv(net_dev); 2117 int rc; 2118 2119 rc = ef4_check_disabled(efx); 2120 if (rc) 2121 return rc; 2122 2123 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 2124 2125 ef4_device_detach_sync(efx); 2126 ef4_stop_all(efx); 2127 2128 mutex_lock(&efx->mac_lock); 2129 net_dev->mtu = new_mtu; 2130 ef4_mac_reconfigure(efx); 2131 mutex_unlock(&efx->mac_lock); 2132 2133 ef4_start_all(efx); 2134 netif_device_attach(efx->net_dev); 2135 return 0; 2136 } 2137 2138 static int ef4_set_mac_address(struct net_device *net_dev, void *data) 2139 { 2140 struct ef4_nic *efx = netdev_priv(net_dev); 2141 struct sockaddr *addr = data; 2142 u8 *new_addr = addr->sa_data; 2143 u8 old_addr[6]; 2144 int rc; 2145 2146 if (!is_valid_ether_addr(new_addr)) { 2147 netif_err(efx, drv, efx->net_dev, 2148 "invalid ethernet MAC address requested: %pM\n", 2149 new_addr); 2150 return -EADDRNOTAVAIL; 2151 } 2152 2153 /* save old address */ 2154 ether_addr_copy(old_addr, net_dev->dev_addr); 2155 eth_hw_addr_set(net_dev, new_addr); 2156 if (efx->type->set_mac_address) { 2157 rc = efx->type->set_mac_address(efx); 2158 if (rc) { 2159 eth_hw_addr_set(net_dev, old_addr); 2160 return rc; 2161 } 2162 } 2163 2164 /* Reconfigure the MAC */ 2165 mutex_lock(&efx->mac_lock); 2166 ef4_mac_reconfigure(efx); 2167 mutex_unlock(&efx->mac_lock); 2168 2169 return 0; 2170 } 2171 2172 /* Context: netif_addr_lock held, BHs disabled. */ 2173 static void ef4_set_rx_mode(struct net_device *net_dev) 2174 { 2175 struct ef4_nic *efx = netdev_priv(net_dev); 2176 2177 if (efx->port_enabled) 2178 queue_work(efx->workqueue, &efx->mac_work); 2179 /* Otherwise ef4_start_port() will do this */ 2180 } 2181 2182 static int ef4_set_features(struct net_device *net_dev, netdev_features_t data) 2183 { 2184 struct ef4_nic *efx = netdev_priv(net_dev); 2185 int rc; 2186 2187 /* If disabling RX n-tuple filtering, clear existing filters */ 2188 if (net_dev->features & ~data & NETIF_F_NTUPLE) { 2189 rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL); 2190 if (rc) 2191 return rc; 2192 } 2193 2194 /* If Rx VLAN filter is changed, update filters via mac_reconfigure */ 2195 if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) { 2196 /* ef4_set_rx_mode() will schedule MAC work to update filters 2197 * when a new features are finally set in net_dev. 2198 */ 2199 ef4_set_rx_mode(net_dev); 2200 } 2201 2202 return 0; 2203 } 2204 2205 static const struct net_device_ops ef4_netdev_ops = { 2206 .ndo_open = ef4_net_open, 2207 .ndo_stop = ef4_net_stop, 2208 .ndo_get_stats64 = ef4_net_stats, 2209 .ndo_tx_timeout = ef4_watchdog, 2210 .ndo_start_xmit = ef4_hard_start_xmit, 2211 .ndo_validate_addr = eth_validate_addr, 2212 .ndo_eth_ioctl = ef4_ioctl, 2213 .ndo_change_mtu = ef4_change_mtu, 2214 .ndo_set_mac_address = ef4_set_mac_address, 2215 .ndo_set_rx_mode = ef4_set_rx_mode, 2216 .ndo_set_features = ef4_set_features, 2217 .ndo_setup_tc = ef4_setup_tc, 2218 #ifdef CONFIG_RFS_ACCEL 2219 .ndo_rx_flow_steer = ef4_filter_rfs, 2220 #endif 2221 }; 2222 2223 static void ef4_update_name(struct ef4_nic *efx) 2224 { 2225 strcpy(efx->name, efx->net_dev->name); 2226 ef4_mtd_rename(efx); 2227 ef4_set_channel_names(efx); 2228 } 2229 2230 static int ef4_netdev_event(struct notifier_block *this, 2231 unsigned long event, void *ptr) 2232 { 2233 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2234 2235 if ((net_dev->netdev_ops == &ef4_netdev_ops) && 2236 event == NETDEV_CHANGENAME) 2237 ef4_update_name(netdev_priv(net_dev)); 2238 2239 return NOTIFY_DONE; 2240 } 2241 2242 static struct notifier_block ef4_netdev_notifier = { 2243 .notifier_call = ef4_netdev_event, 2244 }; 2245 2246 static ssize_t 2247 phy_type_show(struct device *dev, struct device_attribute *attr, char *buf) 2248 { 2249 struct ef4_nic *efx = dev_get_drvdata(dev); 2250 return sprintf(buf, "%d\n", efx->phy_type); 2251 } 2252 static DEVICE_ATTR_RO(phy_type); 2253 2254 static int ef4_register_netdev(struct ef4_nic *efx) 2255 { 2256 struct net_device *net_dev = efx->net_dev; 2257 struct ef4_channel *channel; 2258 int rc; 2259 2260 net_dev->watchdog_timeo = 5 * HZ; 2261 net_dev->irq = efx->pci_dev->irq; 2262 net_dev->netdev_ops = &ef4_netdev_ops; 2263 net_dev->ethtool_ops = &ef4_ethtool_ops; 2264 netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS); 2265 net_dev->min_mtu = EF4_MIN_MTU; 2266 net_dev->max_mtu = EF4_MAX_MTU; 2267 2268 rtnl_lock(); 2269 2270 /* Enable resets to be scheduled and check whether any were 2271 * already requested. If so, the NIC is probably hosed so we 2272 * abort. 2273 */ 2274 efx->state = STATE_READY; 2275 smp_mb(); /* ensure we change state before checking reset_pending */ 2276 if (efx->reset_pending) { 2277 netif_err(efx, probe, efx->net_dev, 2278 "aborting probe due to scheduled reset\n"); 2279 rc = -EIO; 2280 goto fail_locked; 2281 } 2282 2283 rc = dev_alloc_name(net_dev, net_dev->name); 2284 if (rc < 0) 2285 goto fail_locked; 2286 ef4_update_name(efx); 2287 2288 /* Always start with carrier off; PHY events will detect the link */ 2289 netif_carrier_off(net_dev); 2290 2291 rc = register_netdevice(net_dev); 2292 if (rc) 2293 goto fail_locked; 2294 2295 ef4_for_each_channel(channel, efx) { 2296 struct ef4_tx_queue *tx_queue; 2297 ef4_for_each_channel_tx_queue(tx_queue, channel) 2298 ef4_init_tx_queue_core_txq(tx_queue); 2299 } 2300 2301 ef4_associate(efx); 2302 2303 rtnl_unlock(); 2304 2305 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2306 if (rc) { 2307 netif_err(efx, drv, efx->net_dev, 2308 "failed to init net dev attributes\n"); 2309 goto fail_registered; 2310 } 2311 return 0; 2312 2313 fail_registered: 2314 rtnl_lock(); 2315 ef4_dissociate(efx); 2316 unregister_netdevice(net_dev); 2317 fail_locked: 2318 efx->state = STATE_UNINIT; 2319 rtnl_unlock(); 2320 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2321 return rc; 2322 } 2323 2324 static void ef4_unregister_netdev(struct ef4_nic *efx) 2325 { 2326 if (!efx->net_dev) 2327 return; 2328 2329 BUG_ON(netdev_priv(efx->net_dev) != efx); 2330 2331 if (ef4_dev_registered(efx)) { 2332 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2333 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2334 unregister_netdev(efx->net_dev); 2335 } 2336 } 2337 2338 /************************************************************************** 2339 * 2340 * Device reset and suspend 2341 * 2342 **************************************************************************/ 2343 2344 /* Tears down the entire software state and most of the hardware state 2345 * before reset. */ 2346 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method) 2347 { 2348 EF4_ASSERT_RESET_SERIALISED(efx); 2349 2350 ef4_stop_all(efx); 2351 ef4_disable_interrupts(efx); 2352 2353 mutex_lock(&efx->mac_lock); 2354 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2355 method != RESET_TYPE_DATAPATH) 2356 efx->phy_op->fini(efx); 2357 efx->type->fini(efx); 2358 } 2359 2360 /* This function will always ensure that the locks acquired in 2361 * ef4_reset_down() are released. A failure return code indicates 2362 * that we were unable to reinitialise the hardware, and the 2363 * driver should be disabled. If ok is false, then the rx and tx 2364 * engines are not restarted, pending a RESET_DISABLE. */ 2365 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok) 2366 { 2367 int rc; 2368 2369 EF4_ASSERT_RESET_SERIALISED(efx); 2370 2371 /* Ensure that SRAM is initialised even if we're disabling the device */ 2372 rc = efx->type->init(efx); 2373 if (rc) { 2374 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2375 goto fail; 2376 } 2377 2378 if (!ok) 2379 goto fail; 2380 2381 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2382 method != RESET_TYPE_DATAPATH) { 2383 rc = efx->phy_op->init(efx); 2384 if (rc) 2385 goto fail; 2386 rc = efx->phy_op->reconfigure(efx); 2387 if (rc && rc != -EPERM) 2388 netif_err(efx, drv, efx->net_dev, 2389 "could not restore PHY settings\n"); 2390 } 2391 2392 rc = ef4_enable_interrupts(efx); 2393 if (rc) 2394 goto fail; 2395 2396 down_read(&efx->filter_sem); 2397 ef4_restore_filters(efx); 2398 up_read(&efx->filter_sem); 2399 2400 mutex_unlock(&efx->mac_lock); 2401 2402 ef4_start_all(efx); 2403 2404 return 0; 2405 2406 fail: 2407 efx->port_initialized = false; 2408 2409 mutex_unlock(&efx->mac_lock); 2410 2411 return rc; 2412 } 2413 2414 /* Reset the NIC using the specified method. Note that the reset may 2415 * fail, in which case the card will be left in an unusable state. 2416 * 2417 * Caller must hold the rtnl_lock. 2418 */ 2419 int ef4_reset(struct ef4_nic *efx, enum reset_type method) 2420 { 2421 int rc, rc2; 2422 bool disabled; 2423 2424 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2425 RESET_TYPE(method)); 2426 2427 ef4_device_detach_sync(efx); 2428 ef4_reset_down(efx, method); 2429 2430 rc = efx->type->reset(efx, method); 2431 if (rc) { 2432 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 2433 goto out; 2434 } 2435 2436 /* Clear flags for the scopes we covered. We assume the NIC and 2437 * driver are now quiescent so that there is no race here. 2438 */ 2439 if (method < RESET_TYPE_MAX_METHOD) 2440 efx->reset_pending &= -(1 << (method + 1)); 2441 else /* it doesn't fit into the well-ordered scope hierarchy */ 2442 __clear_bit(method, &efx->reset_pending); 2443 2444 /* Reinitialise bus-mastering, which may have been turned off before 2445 * the reset was scheduled. This is still appropriate, even in the 2446 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 2447 * can respond to requests. */ 2448 pci_set_master(efx->pci_dev); 2449 2450 out: 2451 /* Leave device stopped if necessary */ 2452 disabled = rc || 2453 method == RESET_TYPE_DISABLE || 2454 method == RESET_TYPE_RECOVER_OR_DISABLE; 2455 rc2 = ef4_reset_up(efx, method, !disabled); 2456 if (rc2) { 2457 disabled = true; 2458 if (!rc) 2459 rc = rc2; 2460 } 2461 2462 if (disabled) { 2463 dev_close(efx->net_dev); 2464 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 2465 efx->state = STATE_DISABLED; 2466 } else { 2467 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2468 netif_device_attach(efx->net_dev); 2469 } 2470 return rc; 2471 } 2472 2473 /* Try recovery mechanisms. 2474 * For now only EEH is supported. 2475 * Returns 0 if the recovery mechanisms are unsuccessful. 2476 * Returns a non-zero value otherwise. 2477 */ 2478 int ef4_try_recovery(struct ef4_nic *efx) 2479 { 2480 #ifdef CONFIG_EEH 2481 /* A PCI error can occur and not be seen by EEH because nothing 2482 * happens on the PCI bus. In this case the driver may fail and 2483 * schedule a 'recover or reset', leading to this recovery handler. 2484 * Manually call the eeh failure check function. 2485 */ 2486 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); 2487 if (eeh_dev_check_failure(eehdev)) { 2488 /* The EEH mechanisms will handle the error and reset the 2489 * device if necessary. 2490 */ 2491 return 1; 2492 } 2493 #endif 2494 return 0; 2495 } 2496 2497 /* The worker thread exists so that code that cannot sleep can 2498 * schedule a reset for later. 2499 */ 2500 static void ef4_reset_work(struct work_struct *data) 2501 { 2502 struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work); 2503 unsigned long pending; 2504 enum reset_type method; 2505 2506 pending = READ_ONCE(efx->reset_pending); 2507 method = fls(pending) - 1; 2508 2509 if ((method == RESET_TYPE_RECOVER_OR_DISABLE || 2510 method == RESET_TYPE_RECOVER_OR_ALL) && 2511 ef4_try_recovery(efx)) 2512 return; 2513 2514 if (!pending) 2515 return; 2516 2517 rtnl_lock(); 2518 2519 /* We checked the state in ef4_schedule_reset() but it may 2520 * have changed by now. Now that we have the RTNL lock, 2521 * it cannot change again. 2522 */ 2523 if (efx->state == STATE_READY) 2524 (void)ef4_reset(efx, method); 2525 2526 rtnl_unlock(); 2527 } 2528 2529 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) 2530 { 2531 enum reset_type method; 2532 2533 if (efx->state == STATE_RECOVERY) { 2534 netif_dbg(efx, drv, efx->net_dev, 2535 "recovering: skip scheduling %s reset\n", 2536 RESET_TYPE(type)); 2537 return; 2538 } 2539 2540 switch (type) { 2541 case RESET_TYPE_INVISIBLE: 2542 case RESET_TYPE_ALL: 2543 case RESET_TYPE_RECOVER_OR_ALL: 2544 case RESET_TYPE_WORLD: 2545 case RESET_TYPE_DISABLE: 2546 case RESET_TYPE_RECOVER_OR_DISABLE: 2547 case RESET_TYPE_DATAPATH: 2548 method = type; 2549 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2550 RESET_TYPE(method)); 2551 break; 2552 default: 2553 method = efx->type->map_reset_reason(type); 2554 netif_dbg(efx, drv, efx->net_dev, 2555 "scheduling %s reset for %s\n", 2556 RESET_TYPE(method), RESET_TYPE(type)); 2557 break; 2558 } 2559 2560 set_bit(method, &efx->reset_pending); 2561 smp_mb(); /* ensure we change reset_pending before checking state */ 2562 2563 /* If we're not READY then just leave the flags set as the cue 2564 * to abort probing or reschedule the reset later. 2565 */ 2566 if (READ_ONCE(efx->state) != STATE_READY) 2567 return; 2568 2569 queue_work(reset_workqueue, &efx->reset_work); 2570 } 2571 2572 /************************************************************************** 2573 * 2574 * List of NICs we support 2575 * 2576 **************************************************************************/ 2577 2578 /* PCI device ID table */ 2579 static const struct pci_device_id ef4_pci_table[] = { 2580 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2581 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), 2582 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2583 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2584 PCI_DEVICE_ID_SOLARFLARE_SFC4000B), 2585 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2586 {0} /* end of list */ 2587 }; 2588 2589 /************************************************************************** 2590 * 2591 * Dummy PHY/MAC operations 2592 * 2593 * Can be used for some unimplemented operations 2594 * Needed so all function pointers are valid and do not have to be tested 2595 * before use 2596 * 2597 **************************************************************************/ 2598 int ef4_port_dummy_op_int(struct ef4_nic *efx) 2599 { 2600 return 0; 2601 } 2602 void ef4_port_dummy_op_void(struct ef4_nic *efx) {} 2603 2604 static bool ef4_port_dummy_op_poll(struct ef4_nic *efx) 2605 { 2606 return false; 2607 } 2608 2609 static const struct ef4_phy_operations ef4_dummy_phy_operations = { 2610 .init = ef4_port_dummy_op_int, 2611 .reconfigure = ef4_port_dummy_op_int, 2612 .poll = ef4_port_dummy_op_poll, 2613 .fini = ef4_port_dummy_op_void, 2614 }; 2615 2616 /************************************************************************** 2617 * 2618 * Data housekeeping 2619 * 2620 **************************************************************************/ 2621 2622 /* This zeroes out and then fills in the invariants in a struct 2623 * ef4_nic (including all sub-structures). 2624 */ 2625 static int ef4_init_struct(struct ef4_nic *efx, 2626 struct pci_dev *pci_dev, struct net_device *net_dev) 2627 { 2628 int i; 2629 2630 /* Initialise common structures */ 2631 INIT_LIST_HEAD(&efx->node); 2632 INIT_LIST_HEAD(&efx->secondary_list); 2633 spin_lock_init(&efx->biu_lock); 2634 #ifdef CONFIG_SFC_FALCON_MTD 2635 INIT_LIST_HEAD(&efx->mtd_list); 2636 #endif 2637 INIT_WORK(&efx->reset_work, ef4_reset_work); 2638 INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor); 2639 INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work); 2640 efx->pci_dev = pci_dev; 2641 efx->msg_enable = debug; 2642 efx->state = STATE_UNINIT; 2643 strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2644 2645 efx->net_dev = net_dev; 2646 efx->rx_prefix_size = efx->type->rx_prefix_size; 2647 efx->rx_ip_align = 2648 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; 2649 efx->rx_packet_hash_offset = 2650 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2651 efx->rx_packet_ts_offset = 2652 efx->type->rx_ts_offset - efx->type->rx_prefix_size; 2653 spin_lock_init(&efx->stats_lock); 2654 mutex_init(&efx->mac_lock); 2655 efx->phy_op = &ef4_dummy_phy_operations; 2656 efx->mdio.dev = net_dev; 2657 INIT_WORK(&efx->mac_work, ef4_mac_work); 2658 init_waitqueue_head(&efx->flush_wq); 2659 2660 for (i = 0; i < EF4_MAX_CHANNELS; i++) { 2661 efx->channel[i] = ef4_alloc_channel(efx, i, NULL); 2662 if (!efx->channel[i]) 2663 goto fail; 2664 efx->msi_context[i].efx = efx; 2665 efx->msi_context[i].index = i; 2666 } 2667 2668 /* Higher numbered interrupt modes are less capable! */ 2669 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2670 interrupt_mode); 2671 2672 /* Would be good to use the net_dev name, but we're too early */ 2673 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 2674 pci_name(pci_dev)); 2675 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2676 if (!efx->workqueue) 2677 goto fail; 2678 2679 return 0; 2680 2681 fail: 2682 ef4_fini_struct(efx); 2683 return -ENOMEM; 2684 } 2685 2686 static void ef4_fini_struct(struct ef4_nic *efx) 2687 { 2688 int i; 2689 2690 for (i = 0; i < EF4_MAX_CHANNELS; i++) 2691 kfree(efx->channel[i]); 2692 2693 kfree(efx->vpd_sn); 2694 2695 if (efx->workqueue) { 2696 destroy_workqueue(efx->workqueue); 2697 efx->workqueue = NULL; 2698 } 2699 } 2700 2701 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats) 2702 { 2703 u64 n_rx_nodesc_trunc = 0; 2704 struct ef4_channel *channel; 2705 2706 ef4_for_each_channel(channel, efx) 2707 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 2708 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 2709 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 2710 } 2711 2712 /************************************************************************** 2713 * 2714 * PCI interface 2715 * 2716 **************************************************************************/ 2717 2718 /* Main body of final NIC shutdown code 2719 * This is called only at module unload (or hotplug removal). 2720 */ 2721 static void ef4_pci_remove_main(struct ef4_nic *efx) 2722 { 2723 /* Flush reset_work. It can no longer be scheduled since we 2724 * are not READY. 2725 */ 2726 BUG_ON(efx->state == STATE_READY); 2727 cancel_work_sync(&efx->reset_work); 2728 2729 ef4_disable_interrupts(efx); 2730 ef4_nic_fini_interrupt(efx); 2731 ef4_fini_port(efx); 2732 efx->type->fini(efx); 2733 ef4_fini_napi(efx); 2734 ef4_remove_all(efx); 2735 } 2736 2737 /* Final NIC shutdown 2738 * This is called only at module unload (or hotplug removal). A PF can call 2739 * this on its VFs to ensure they are unbound first. 2740 */ 2741 static void ef4_pci_remove(struct pci_dev *pci_dev) 2742 { 2743 struct ef4_nic *efx; 2744 2745 efx = pci_get_drvdata(pci_dev); 2746 if (!efx) 2747 return; 2748 2749 /* Mark the NIC as fini, then stop the interface */ 2750 rtnl_lock(); 2751 ef4_dissociate(efx); 2752 dev_close(efx->net_dev); 2753 ef4_disable_interrupts(efx); 2754 efx->state = STATE_UNINIT; 2755 rtnl_unlock(); 2756 2757 ef4_unregister_netdev(efx); 2758 2759 ef4_mtd_remove(efx); 2760 2761 ef4_pci_remove_main(efx); 2762 2763 ef4_fini_io(efx); 2764 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 2765 2766 ef4_fini_struct(efx); 2767 free_netdev(efx->net_dev); 2768 2769 pci_disable_pcie_error_reporting(pci_dev); 2770 }; 2771 2772 /* NIC VPD information 2773 * Called during probe to display the part number of the installed NIC. 2774 */ 2775 static void ef4_probe_vpd_strings(struct ef4_nic *efx) 2776 { 2777 struct pci_dev *dev = efx->pci_dev; 2778 unsigned int vpd_size, kw_len; 2779 u8 *vpd_data; 2780 int start; 2781 2782 vpd_data = pci_vpd_alloc(dev, &vpd_size); 2783 if (IS_ERR(vpd_data)) { 2784 pci_warn(dev, "Unable to read VPD\n"); 2785 return; 2786 } 2787 2788 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 2789 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 2790 if (start < 0) 2791 pci_warn(dev, "Part number not found or incomplete\n"); 2792 else 2793 pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); 2794 2795 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 2796 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); 2797 if (start < 0) 2798 pci_warn(dev, "Serial number not found or incomplete\n"); 2799 else 2800 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); 2801 2802 kfree(vpd_data); 2803 } 2804 2805 2806 /* Main body of NIC initialisation 2807 * This is called at module load (or hotplug insertion, theoretically). 2808 */ 2809 static int ef4_pci_probe_main(struct ef4_nic *efx) 2810 { 2811 int rc; 2812 2813 /* Do start-of-day initialisation */ 2814 rc = ef4_probe_all(efx); 2815 if (rc) 2816 goto fail1; 2817 2818 ef4_init_napi(efx); 2819 2820 rc = efx->type->init(efx); 2821 if (rc) { 2822 netif_err(efx, probe, efx->net_dev, 2823 "failed to initialise NIC\n"); 2824 goto fail3; 2825 } 2826 2827 rc = ef4_init_port(efx); 2828 if (rc) { 2829 netif_err(efx, probe, efx->net_dev, 2830 "failed to initialise port\n"); 2831 goto fail4; 2832 } 2833 2834 rc = ef4_nic_init_interrupt(efx); 2835 if (rc) 2836 goto fail5; 2837 rc = ef4_enable_interrupts(efx); 2838 if (rc) 2839 goto fail6; 2840 2841 return 0; 2842 2843 fail6: 2844 ef4_nic_fini_interrupt(efx); 2845 fail5: 2846 ef4_fini_port(efx); 2847 fail4: 2848 efx->type->fini(efx); 2849 fail3: 2850 ef4_fini_napi(efx); 2851 ef4_remove_all(efx); 2852 fail1: 2853 return rc; 2854 } 2855 2856 /* NIC initialisation 2857 * 2858 * This is called at module load (or hotplug insertion, 2859 * theoretically). It sets up PCI mappings, resets the NIC, 2860 * sets up and registers the network devices with the kernel and hooks 2861 * the interrupt service routine. It does not prepare the device for 2862 * transmission; this is left to the first time one of the network 2863 * interfaces is brought up (i.e. ef4_net_open). 2864 */ 2865 static int ef4_pci_probe(struct pci_dev *pci_dev, 2866 const struct pci_device_id *entry) 2867 { 2868 struct net_device *net_dev; 2869 struct ef4_nic *efx; 2870 int rc; 2871 2872 /* Allocate and initialise a struct net_device and struct ef4_nic */ 2873 net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES, 2874 EF4_MAX_RX_QUEUES); 2875 if (!net_dev) 2876 return -ENOMEM; 2877 efx = netdev_priv(net_dev); 2878 efx->type = (const struct ef4_nic_type *) entry->driver_data; 2879 efx->fixed_features |= NETIF_F_HIGHDMA; 2880 2881 pci_set_drvdata(pci_dev, efx); 2882 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2883 rc = ef4_init_struct(efx, pci_dev, net_dev); 2884 if (rc) 2885 goto fail1; 2886 2887 netif_info(efx, probe, efx->net_dev, 2888 "Solarflare NIC detected\n"); 2889 2890 ef4_probe_vpd_strings(efx); 2891 2892 /* Set up basic I/O (BAR mappings etc) */ 2893 rc = ef4_init_io(efx); 2894 if (rc) 2895 goto fail2; 2896 2897 rc = ef4_pci_probe_main(efx); 2898 if (rc) 2899 goto fail3; 2900 2901 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 2902 NETIF_F_RXCSUM); 2903 /* Mask for features that also apply to VLAN devices */ 2904 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 2905 NETIF_F_HIGHDMA | NETIF_F_RXCSUM); 2906 2907 net_dev->hw_features = net_dev->features & ~efx->fixed_features; 2908 2909 /* Disable VLAN filtering by default. It may be enforced if 2910 * the feature is fixed (i.e. VLAN filters are required to 2911 * receive VLAN tagged packets due to vPort restrictions). 2912 */ 2913 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2914 net_dev->features |= efx->fixed_features; 2915 2916 rc = ef4_register_netdev(efx); 2917 if (rc) 2918 goto fail4; 2919 2920 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 2921 2922 /* Try to create MTDs, but allow this to fail */ 2923 rtnl_lock(); 2924 rc = ef4_mtd_probe(efx); 2925 rtnl_unlock(); 2926 if (rc && rc != -EPERM) 2927 netif_warn(efx, probe, efx->net_dev, 2928 "failed to create MTDs (%d)\n", rc); 2929 2930 rc = pci_enable_pcie_error_reporting(pci_dev); 2931 if (rc && rc != -EINVAL) 2932 netif_notice(efx, probe, efx->net_dev, 2933 "PCIE error reporting unavailable (%d).\n", 2934 rc); 2935 2936 return 0; 2937 2938 fail4: 2939 ef4_pci_remove_main(efx); 2940 fail3: 2941 ef4_fini_io(efx); 2942 fail2: 2943 ef4_fini_struct(efx); 2944 fail1: 2945 WARN_ON(rc > 0); 2946 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 2947 free_netdev(net_dev); 2948 return rc; 2949 } 2950 2951 static int ef4_pm_freeze(struct device *dev) 2952 { 2953 struct ef4_nic *efx = dev_get_drvdata(dev); 2954 2955 rtnl_lock(); 2956 2957 if (efx->state != STATE_DISABLED) { 2958 efx->state = STATE_UNINIT; 2959 2960 ef4_device_detach_sync(efx); 2961 2962 ef4_stop_all(efx); 2963 ef4_disable_interrupts(efx); 2964 } 2965 2966 rtnl_unlock(); 2967 2968 return 0; 2969 } 2970 2971 static int ef4_pm_thaw(struct device *dev) 2972 { 2973 int rc; 2974 struct ef4_nic *efx = dev_get_drvdata(dev); 2975 2976 rtnl_lock(); 2977 2978 if (efx->state != STATE_DISABLED) { 2979 rc = ef4_enable_interrupts(efx); 2980 if (rc) 2981 goto fail; 2982 2983 mutex_lock(&efx->mac_lock); 2984 efx->phy_op->reconfigure(efx); 2985 mutex_unlock(&efx->mac_lock); 2986 2987 ef4_start_all(efx); 2988 2989 netif_device_attach(efx->net_dev); 2990 2991 efx->state = STATE_READY; 2992 2993 efx->type->resume_wol(efx); 2994 } 2995 2996 rtnl_unlock(); 2997 2998 /* Reschedule any quenched resets scheduled during ef4_pm_freeze() */ 2999 queue_work(reset_workqueue, &efx->reset_work); 3000 3001 return 0; 3002 3003 fail: 3004 rtnl_unlock(); 3005 3006 return rc; 3007 } 3008 3009 static int ef4_pm_poweroff(struct device *dev) 3010 { 3011 struct pci_dev *pci_dev = to_pci_dev(dev); 3012 struct ef4_nic *efx = pci_get_drvdata(pci_dev); 3013 3014 efx->type->fini(efx); 3015 3016 efx->reset_pending = 0; 3017 3018 pci_save_state(pci_dev); 3019 return pci_set_power_state(pci_dev, PCI_D3hot); 3020 } 3021 3022 /* Used for both resume and restore */ 3023 static int ef4_pm_resume(struct device *dev) 3024 { 3025 struct pci_dev *pci_dev = to_pci_dev(dev); 3026 struct ef4_nic *efx = pci_get_drvdata(pci_dev); 3027 int rc; 3028 3029 rc = pci_set_power_state(pci_dev, PCI_D0); 3030 if (rc) 3031 return rc; 3032 pci_restore_state(pci_dev); 3033 rc = pci_enable_device(pci_dev); 3034 if (rc) 3035 return rc; 3036 pci_set_master(efx->pci_dev); 3037 rc = efx->type->reset(efx, RESET_TYPE_ALL); 3038 if (rc) 3039 return rc; 3040 rc = efx->type->init(efx); 3041 if (rc) 3042 return rc; 3043 rc = ef4_pm_thaw(dev); 3044 return rc; 3045 } 3046 3047 static int ef4_pm_suspend(struct device *dev) 3048 { 3049 int rc; 3050 3051 ef4_pm_freeze(dev); 3052 rc = ef4_pm_poweroff(dev); 3053 if (rc) 3054 ef4_pm_resume(dev); 3055 return rc; 3056 } 3057 3058 static const struct dev_pm_ops ef4_pm_ops = { 3059 .suspend = ef4_pm_suspend, 3060 .resume = ef4_pm_resume, 3061 .freeze = ef4_pm_freeze, 3062 .thaw = ef4_pm_thaw, 3063 .poweroff = ef4_pm_poweroff, 3064 .restore = ef4_pm_resume, 3065 }; 3066 3067 /* A PCI error affecting this device was detected. 3068 * At this point MMIO and DMA may be disabled. 3069 * Stop the software path and request a slot reset. 3070 */ 3071 static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev, 3072 pci_channel_state_t state) 3073 { 3074 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3075 struct ef4_nic *efx = pci_get_drvdata(pdev); 3076 3077 if (state == pci_channel_io_perm_failure) 3078 return PCI_ERS_RESULT_DISCONNECT; 3079 3080 rtnl_lock(); 3081 3082 if (efx->state != STATE_DISABLED) { 3083 efx->state = STATE_RECOVERY; 3084 efx->reset_pending = 0; 3085 3086 ef4_device_detach_sync(efx); 3087 3088 ef4_stop_all(efx); 3089 ef4_disable_interrupts(efx); 3090 3091 status = PCI_ERS_RESULT_NEED_RESET; 3092 } else { 3093 /* If the interface is disabled we don't want to do anything 3094 * with it. 3095 */ 3096 status = PCI_ERS_RESULT_RECOVERED; 3097 } 3098 3099 rtnl_unlock(); 3100 3101 pci_disable_device(pdev); 3102 3103 return status; 3104 } 3105 3106 /* Fake a successful reset, which will be performed later in ef4_io_resume. */ 3107 static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev) 3108 { 3109 struct ef4_nic *efx = pci_get_drvdata(pdev); 3110 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3111 3112 if (pci_enable_device(pdev)) { 3113 netif_err(efx, hw, efx->net_dev, 3114 "Cannot re-enable PCI device after reset.\n"); 3115 status = PCI_ERS_RESULT_DISCONNECT; 3116 } 3117 3118 return status; 3119 } 3120 3121 /* Perform the actual reset and resume I/O operations. */ 3122 static void ef4_io_resume(struct pci_dev *pdev) 3123 { 3124 struct ef4_nic *efx = pci_get_drvdata(pdev); 3125 int rc; 3126 3127 rtnl_lock(); 3128 3129 if (efx->state == STATE_DISABLED) 3130 goto out; 3131 3132 rc = ef4_reset(efx, RESET_TYPE_ALL); 3133 if (rc) { 3134 netif_err(efx, hw, efx->net_dev, 3135 "ef4_reset failed after PCI error (%d)\n", rc); 3136 } else { 3137 efx->state = STATE_READY; 3138 netif_dbg(efx, hw, efx->net_dev, 3139 "Done resetting and resuming IO after PCI error.\n"); 3140 } 3141 3142 out: 3143 rtnl_unlock(); 3144 } 3145 3146 /* For simplicity and reliability, we always require a slot reset and try to 3147 * reset the hardware when a pci error affecting the device is detected. 3148 * We leave both the link_reset and mmio_enabled callback unimplemented: 3149 * with our request for slot reset the mmio_enabled callback will never be 3150 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3151 */ 3152 static const struct pci_error_handlers ef4_err_handlers = { 3153 .error_detected = ef4_io_error_detected, 3154 .slot_reset = ef4_io_slot_reset, 3155 .resume = ef4_io_resume, 3156 }; 3157 3158 static struct pci_driver ef4_pci_driver = { 3159 .name = KBUILD_MODNAME, 3160 .id_table = ef4_pci_table, 3161 .probe = ef4_pci_probe, 3162 .remove = ef4_pci_remove, 3163 .driver.pm = &ef4_pm_ops, 3164 .err_handler = &ef4_err_handlers, 3165 }; 3166 3167 /************************************************************************** 3168 * 3169 * Kernel module interface 3170 * 3171 *************************************************************************/ 3172 3173 module_param(interrupt_mode, uint, 0444); 3174 MODULE_PARM_DESC(interrupt_mode, 3175 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 3176 3177 static int __init ef4_init_module(void) 3178 { 3179 int rc; 3180 3181 printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n"); 3182 3183 rc = register_netdevice_notifier(&ef4_netdev_notifier); 3184 if (rc) 3185 goto err_notifier; 3186 3187 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 3188 if (!reset_workqueue) { 3189 rc = -ENOMEM; 3190 goto err_reset; 3191 } 3192 3193 rc = pci_register_driver(&ef4_pci_driver); 3194 if (rc < 0) 3195 goto err_pci; 3196 3197 return 0; 3198 3199 err_pci: 3200 destroy_workqueue(reset_workqueue); 3201 err_reset: 3202 unregister_netdevice_notifier(&ef4_netdev_notifier); 3203 err_notifier: 3204 return rc; 3205 } 3206 3207 static void __exit ef4_exit_module(void) 3208 { 3209 printk(KERN_INFO "Solarflare Falcon driver unloading\n"); 3210 3211 pci_unregister_driver(&ef4_pci_driver); 3212 destroy_workqueue(reset_workqueue); 3213 unregister_netdevice_notifier(&ef4_netdev_notifier); 3214 3215 } 3216 3217 module_init(ef4_init_module); 3218 module_exit(ef4_exit_module); 3219 3220 MODULE_AUTHOR("Solarflare Communications and " 3221 "Michael Brown <mbrown@fensystems.co.uk>"); 3222 MODULE_DESCRIPTION("Solarflare Falcon network driver"); 3223 MODULE_LICENSE("GPL"); 3224 MODULE_DEVICE_TABLE(pci, ef4_pci_table); 3225 MODULE_VERSION(EF4_DRIVER_VERSION); 3226