1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/delay.h> 13 #include <linux/notifier.h> 14 #include <linux/ip.h> 15 #include <linux/tcp.h> 16 #include <linux/in.h> 17 #include <linux/ethtool.h> 18 #include <linux/topology.h> 19 #include <linux/gfp.h> 20 #include <linux/aer.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include <net/gre.h> 24 #include <net/udp_tunnel.h> 25 #include "efx.h" 26 #include "nic.h" 27 #include "io.h" 28 #include "selftest.h" 29 #include "sriov.h" 30 31 #include "mcdi.h" 32 #include "mcdi_pcol.h" 33 #include "workarounds.h" 34 35 /************************************************************************** 36 * 37 * Type name strings 38 * 39 ************************************************************************** 40 */ 41 42 /* Loopback mode names (see LOOPBACK_MODE()) */ 43 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 44 const char *const efx_loopback_mode_names[] = { 45 [LOOPBACK_NONE] = "NONE", 46 [LOOPBACK_DATA] = "DATAPATH", 47 [LOOPBACK_GMAC] = "GMAC", 48 [LOOPBACK_XGMII] = "XGMII", 49 [LOOPBACK_XGXS] = "XGXS", 50 [LOOPBACK_XAUI] = "XAUI", 51 [LOOPBACK_GMII] = "GMII", 52 [LOOPBACK_SGMII] = "SGMII", 53 [LOOPBACK_XGBR] = "XGBR", 54 [LOOPBACK_XFI] = "XFI", 55 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 56 [LOOPBACK_GMII_FAR] = "GMII_FAR", 57 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 58 [LOOPBACK_XFI_FAR] = "XFI_FAR", 59 [LOOPBACK_GPHY] = "GPHY", 60 [LOOPBACK_PHYXS] = "PHYXS", 61 [LOOPBACK_PCS] = "PCS", 62 [LOOPBACK_PMAPMD] = "PMA/PMD", 63 [LOOPBACK_XPORT] = "XPORT", 64 [LOOPBACK_XGMII_WS] = "XGMII_WS", 65 [LOOPBACK_XAUI_WS] = "XAUI_WS", 66 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 67 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 68 [LOOPBACK_GMII_WS] = "GMII_WS", 69 [LOOPBACK_XFI_WS] = "XFI_WS", 70 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 71 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 72 }; 73 74 const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 75 const char *const efx_reset_type_names[] = { 76 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 77 [RESET_TYPE_ALL] = "ALL", 78 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 79 [RESET_TYPE_WORLD] = "WORLD", 80 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 81 [RESET_TYPE_DATAPATH] = "DATAPATH", 82 [RESET_TYPE_MC_BIST] = "MC_BIST", 83 [RESET_TYPE_DISABLE] = "DISABLE", 84 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 85 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 86 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 87 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 88 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 89 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", 90 }; 91 92 /* UDP tunnel type names */ 93 static const char *const efx_udp_tunnel_type_names[] = { 94 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", 95 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", 96 }; 97 98 void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) 99 { 100 if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && 101 efx_udp_tunnel_type_names[type] != NULL) 102 snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); 103 else 104 snprintf(buf, buflen, "type %d", type); 105 } 106 107 /* Reset workqueue. If any NIC has a hardware failure then a reset will be 108 * queued onto this work queue. This is not a per-nic work queue, because 109 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 110 */ 111 static struct workqueue_struct *reset_workqueue; 112 113 /* How often and how many times to poll for a reset while waiting for a 114 * BIST that another function started to complete. 115 */ 116 #define BIST_WAIT_DELAY_MS 100 117 #define BIST_WAIT_DELAY_COUNT 100 118 119 /************************************************************************** 120 * 121 * Configurable values 122 * 123 *************************************************************************/ 124 125 /* 126 * Use separate channels for TX and RX events 127 * 128 * Set this to 1 to use separate channels for TX and RX. It allows us 129 * to control interrupt affinity separately for TX and RX. 130 * 131 * This is only used in MSI-X interrupt mode 132 */ 133 bool efx_separate_tx_channels; 134 module_param(efx_separate_tx_channels, bool, 0444); 135 MODULE_PARM_DESC(efx_separate_tx_channels, 136 "Use separate channels for TX and RX"); 137 138 /* This is the weight assigned to each of the (per-channel) virtual 139 * NAPI devices. 140 */ 141 static int napi_weight = 64; 142 143 /* This is the time (in jiffies) between invocations of the hardware 144 * monitor. 145 * On Falcon-based NICs, this will: 146 * - Check the on-board hardware monitor; 147 * - Poll the link state and reconfigure the hardware as necessary. 148 * On Siena-based NICs for power systems with EEH support, this will give EEH a 149 * chance to start. 150 */ 151 static unsigned int efx_monitor_interval = 1 * HZ; 152 153 /* Initial interrupt moderation settings. They can be modified after 154 * module load with ethtool. 155 * 156 * The default for RX should strike a balance between increasing the 157 * round-trip latency and reducing overhead. 158 */ 159 static unsigned int rx_irq_mod_usec = 60; 160 161 /* Initial interrupt moderation settings. They can be modified after 162 * module load with ethtool. 163 * 164 * This default is chosen to ensure that a 10G link does not go idle 165 * while a TX queue is stopped after it has become full. A queue is 166 * restarted when it drops below half full. The time this takes (assuming 167 * worst case 3 descriptors per packet and 1024 descriptors) is 168 * 512 / 3 * 1.2 = 205 usec. 169 */ 170 static unsigned int tx_irq_mod_usec = 150; 171 172 /* This is the first interrupt mode to try out of: 173 * 0 => MSI-X 174 * 1 => MSI 175 * 2 => legacy 176 */ 177 static unsigned int interrupt_mode; 178 179 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 180 * i.e. the number of CPUs among which we may distribute simultaneous 181 * interrupt handling. 182 * 183 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 184 * The default (0) means to assign an interrupt to each core. 185 */ 186 static unsigned int rss_cpus; 187 module_param(rss_cpus, uint, 0444); 188 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 189 190 static bool phy_flash_cfg; 191 module_param(phy_flash_cfg, bool, 0644); 192 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 193 194 static unsigned irq_adapt_low_thresh = 8000; 195 module_param(irq_adapt_low_thresh, uint, 0644); 196 MODULE_PARM_DESC(irq_adapt_low_thresh, 197 "Threshold score for reducing IRQ moderation"); 198 199 static unsigned irq_adapt_high_thresh = 16000; 200 module_param(irq_adapt_high_thresh, uint, 0644); 201 MODULE_PARM_DESC(irq_adapt_high_thresh, 202 "Threshold score for increasing IRQ moderation"); 203 204 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 205 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 206 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 207 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 208 module_param(debug, uint, 0); 209 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 210 211 /************************************************************************** 212 * 213 * Utility functions and prototypes 214 * 215 *************************************************************************/ 216 217 static int efx_soft_enable_interrupts(struct efx_nic *efx); 218 static void efx_soft_disable_interrupts(struct efx_nic *efx); 219 static void efx_remove_channel(struct efx_channel *channel); 220 static void efx_remove_channels(struct efx_nic *efx); 221 static const struct efx_channel_type efx_default_channel_type; 222 static void efx_remove_port(struct efx_nic *efx); 223 static void efx_init_napi_channel(struct efx_channel *channel); 224 static void efx_fini_napi(struct efx_nic *efx); 225 static void efx_fini_napi_channel(struct efx_channel *channel); 226 static void efx_fini_struct(struct efx_nic *efx); 227 static void efx_start_all(struct efx_nic *efx); 228 static void efx_stop_all(struct efx_nic *efx); 229 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); 230 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); 231 232 #define EFX_ASSERT_RESET_SERIALISED(efx) \ 233 do { \ 234 if ((efx->state == STATE_READY) || \ 235 (efx->state == STATE_RECOVERY) || \ 236 (efx->state == STATE_DISABLED)) \ 237 ASSERT_RTNL(); \ 238 } while (0) 239 240 static int efx_check_disabled(struct efx_nic *efx) 241 { 242 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { 243 netif_err(efx, drv, efx->net_dev, 244 "device is disabled due to earlier errors\n"); 245 return -EIO; 246 } 247 return 0; 248 } 249 250 /************************************************************************** 251 * 252 * Event queue processing 253 * 254 *************************************************************************/ 255 256 /* Process channel's event queue 257 * 258 * This function is responsible for processing the event queue of a 259 * single channel. The caller must guarantee that this function will 260 * never be concurrently called more than once on the same channel, 261 * though different channels may be being processed concurrently. 262 */ 263 static int efx_process_channel(struct efx_channel *channel, int budget) 264 { 265 struct efx_tx_queue *tx_queue; 266 struct list_head rx_list; 267 int spent; 268 269 if (unlikely(!channel->enabled)) 270 return 0; 271 272 /* Prepare the batch receive list */ 273 EFX_WARN_ON_PARANOID(channel->rx_list != NULL); 274 INIT_LIST_HEAD(&rx_list); 275 channel->rx_list = &rx_list; 276 277 efx_for_each_channel_tx_queue(tx_queue, channel) { 278 tx_queue->pkts_compl = 0; 279 tx_queue->bytes_compl = 0; 280 } 281 282 spent = efx_nic_process_eventq(channel, budget); 283 if (spent && efx_channel_has_rx_queue(channel)) { 284 struct efx_rx_queue *rx_queue = 285 efx_channel_get_rx_queue(channel); 286 287 efx_rx_flush_packet(channel); 288 efx_fast_push_rx_descriptors(rx_queue, true); 289 } 290 291 /* Update BQL */ 292 efx_for_each_channel_tx_queue(tx_queue, channel) { 293 if (tx_queue->bytes_compl) { 294 netdev_tx_completed_queue(tx_queue->core_txq, 295 tx_queue->pkts_compl, tx_queue->bytes_compl); 296 } 297 } 298 299 /* Receive any packets we queued up */ 300 netif_receive_skb_list(channel->rx_list); 301 channel->rx_list = NULL; 302 303 return spent; 304 } 305 306 /* NAPI poll handler 307 * 308 * NAPI guarantees serialisation of polls of the same device, which 309 * provides the guarantee required by efx_process_channel(). 310 */ 311 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) 312 { 313 int step = efx->irq_mod_step_us; 314 315 if (channel->irq_mod_score < irq_adapt_low_thresh) { 316 if (channel->irq_moderation_us > step) { 317 channel->irq_moderation_us -= step; 318 efx->type->push_irq_moderation(channel); 319 } 320 } else if (channel->irq_mod_score > irq_adapt_high_thresh) { 321 if (channel->irq_moderation_us < 322 efx->irq_rx_moderation_us) { 323 channel->irq_moderation_us += step; 324 efx->type->push_irq_moderation(channel); 325 } 326 } 327 328 channel->irq_count = 0; 329 channel->irq_mod_score = 0; 330 } 331 332 static int efx_poll(struct napi_struct *napi, int budget) 333 { 334 struct efx_channel *channel = 335 container_of(napi, struct efx_channel, napi_str); 336 struct efx_nic *efx = channel->efx; 337 int spent; 338 339 netif_vdbg(efx, intr, efx->net_dev, 340 "channel %d NAPI poll executing on CPU %d\n", 341 channel->channel, raw_smp_processor_id()); 342 343 spent = efx_process_channel(channel, budget); 344 345 xdp_do_flush_map(); 346 347 if (spent < budget) { 348 if (efx_channel_has_rx_queue(channel) && 349 efx->irq_rx_adaptive && 350 unlikely(++channel->irq_count == 1000)) { 351 efx_update_irq_mod(efx, channel); 352 } 353 354 #ifdef CONFIG_RFS_ACCEL 355 /* Perhaps expire some ARFS filters */ 356 schedule_work(&channel->filter_work); 357 #endif 358 359 /* There is no race here; although napi_disable() will 360 * only wait for napi_complete(), this isn't a problem 361 * since efx_nic_eventq_read_ack() will have no effect if 362 * interrupts have already been disabled. 363 */ 364 if (napi_complete_done(napi, spent)) 365 efx_nic_eventq_read_ack(channel); 366 } 367 368 return spent; 369 } 370 371 /* Create event queue 372 * Event queue memory allocations are done only once. If the channel 373 * is reset, the memory buffer will be reused; this guards against 374 * errors during channel reset and also simplifies interrupt handling. 375 */ 376 static int efx_probe_eventq(struct efx_channel *channel) 377 { 378 struct efx_nic *efx = channel->efx; 379 unsigned long entries; 380 381 netif_dbg(efx, probe, efx->net_dev, 382 "chan %d create event queue\n", channel->channel); 383 384 /* Build an event queue with room for one event per tx and rx buffer, 385 * plus some extra for link state events and MCDI completions. */ 386 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 387 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); 388 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; 389 390 return efx_nic_probe_eventq(channel); 391 } 392 393 /* Prepare channel's event queue */ 394 static int efx_init_eventq(struct efx_channel *channel) 395 { 396 struct efx_nic *efx = channel->efx; 397 int rc; 398 399 EFX_WARN_ON_PARANOID(channel->eventq_init); 400 401 netif_dbg(efx, drv, efx->net_dev, 402 "chan %d init event queue\n", channel->channel); 403 404 rc = efx_nic_init_eventq(channel); 405 if (rc == 0) { 406 efx->type->push_irq_moderation(channel); 407 channel->eventq_read_ptr = 0; 408 channel->eventq_init = true; 409 } 410 return rc; 411 } 412 413 /* Enable event queue processing and NAPI */ 414 void efx_start_eventq(struct efx_channel *channel) 415 { 416 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 417 "chan %d start event queue\n", channel->channel); 418 419 /* Make sure the NAPI handler sees the enabled flag set */ 420 channel->enabled = true; 421 smp_wmb(); 422 423 napi_enable(&channel->napi_str); 424 efx_nic_eventq_read_ack(channel); 425 } 426 427 /* Disable event queue processing and NAPI */ 428 void efx_stop_eventq(struct efx_channel *channel) 429 { 430 if (!channel->enabled) 431 return; 432 433 napi_disable(&channel->napi_str); 434 channel->enabled = false; 435 } 436 437 static void efx_fini_eventq(struct efx_channel *channel) 438 { 439 if (!channel->eventq_init) 440 return; 441 442 netif_dbg(channel->efx, drv, channel->efx->net_dev, 443 "chan %d fini event queue\n", channel->channel); 444 445 efx_nic_fini_eventq(channel); 446 channel->eventq_init = false; 447 } 448 449 static void efx_remove_eventq(struct efx_channel *channel) 450 { 451 netif_dbg(channel->efx, drv, channel->efx->net_dev, 452 "chan %d remove event queue\n", channel->channel); 453 454 efx_nic_remove_eventq(channel); 455 } 456 457 /************************************************************************** 458 * 459 * Channel handling 460 * 461 *************************************************************************/ 462 463 /* Allocate and initialise a channel structure. */ 464 static struct efx_channel * 465 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 466 { 467 struct efx_channel *channel; 468 struct efx_rx_queue *rx_queue; 469 struct efx_tx_queue *tx_queue; 470 int j; 471 472 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 473 if (!channel) 474 return NULL; 475 476 channel->efx = efx; 477 channel->channel = i; 478 channel->type = &efx_default_channel_type; 479 480 for (j = 0; j < EFX_TXQ_TYPES; j++) { 481 tx_queue = &channel->tx_queue[j]; 482 tx_queue->efx = efx; 483 tx_queue->queue = i * EFX_TXQ_TYPES + j; 484 tx_queue->channel = channel; 485 } 486 487 #ifdef CONFIG_RFS_ACCEL 488 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); 489 #endif 490 491 rx_queue = &channel->rx_queue; 492 rx_queue->efx = efx; 493 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); 494 495 return channel; 496 } 497 498 /* Allocate and initialise a channel structure, copying parameters 499 * (but not resources) from an old channel structure. 500 */ 501 static struct efx_channel * 502 efx_copy_channel(const struct efx_channel *old_channel) 503 { 504 struct efx_channel *channel; 505 struct efx_rx_queue *rx_queue; 506 struct efx_tx_queue *tx_queue; 507 int j; 508 509 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 510 if (!channel) 511 return NULL; 512 513 *channel = *old_channel; 514 515 channel->napi_dev = NULL; 516 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); 517 channel->napi_str.napi_id = 0; 518 channel->napi_str.state = 0; 519 memset(&channel->eventq, 0, sizeof(channel->eventq)); 520 521 for (j = 0; j < EFX_TXQ_TYPES; j++) { 522 tx_queue = &channel->tx_queue[j]; 523 if (tx_queue->channel) 524 tx_queue->channel = channel; 525 tx_queue->buffer = NULL; 526 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 527 } 528 529 rx_queue = &channel->rx_queue; 530 rx_queue->buffer = NULL; 531 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 532 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); 533 #ifdef CONFIG_RFS_ACCEL 534 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); 535 #endif 536 537 return channel; 538 } 539 540 static int efx_probe_channel(struct efx_channel *channel) 541 { 542 struct efx_tx_queue *tx_queue; 543 struct efx_rx_queue *rx_queue; 544 int rc; 545 546 netif_dbg(channel->efx, probe, channel->efx->net_dev, 547 "creating channel %d\n", channel->channel); 548 549 rc = channel->type->pre_probe(channel); 550 if (rc) 551 goto fail; 552 553 rc = efx_probe_eventq(channel); 554 if (rc) 555 goto fail; 556 557 efx_for_each_channel_tx_queue(tx_queue, channel) { 558 rc = efx_probe_tx_queue(tx_queue); 559 if (rc) 560 goto fail; 561 } 562 563 efx_for_each_channel_rx_queue(rx_queue, channel) { 564 rc = efx_probe_rx_queue(rx_queue); 565 if (rc) 566 goto fail; 567 } 568 569 channel->rx_list = NULL; 570 571 return 0; 572 573 fail: 574 efx_remove_channel(channel); 575 return rc; 576 } 577 578 static void 579 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) 580 { 581 struct efx_nic *efx = channel->efx; 582 const char *type; 583 int number; 584 585 number = channel->channel; 586 if (efx->tx_channel_offset == 0) { 587 type = ""; 588 } else if (channel->channel < efx->tx_channel_offset) { 589 type = "-rx"; 590 } else { 591 type = "-tx"; 592 number -= efx->tx_channel_offset; 593 } 594 snprintf(buf, len, "%s%s-%d", efx->name, type, number); 595 } 596 597 static void efx_set_channel_names(struct efx_nic *efx) 598 { 599 struct efx_channel *channel; 600 601 efx_for_each_channel(channel, efx) 602 channel->type->get_name(channel, 603 efx->msi_context[channel->channel].name, 604 sizeof(efx->msi_context[0].name)); 605 } 606 607 static int efx_probe_channels(struct efx_nic *efx) 608 { 609 struct efx_channel *channel; 610 int rc; 611 612 /* Restart special buffer allocation */ 613 efx->next_buffer_table = 0; 614 615 /* Probe channels in reverse, so that any 'extra' channels 616 * use the start of the buffer table. This allows the traffic 617 * channels to be resized without moving them or wasting the 618 * entries before them. 619 */ 620 efx_for_each_channel_rev(channel, efx) { 621 rc = efx_probe_channel(channel); 622 if (rc) { 623 netif_err(efx, probe, efx->net_dev, 624 "failed to create channel %d\n", 625 channel->channel); 626 goto fail; 627 } 628 } 629 efx_set_channel_names(efx); 630 631 return 0; 632 633 fail: 634 efx_remove_channels(efx); 635 return rc; 636 } 637 638 /* Channels are shutdown and reinitialised whilst the NIC is running 639 * to propagate configuration changes (mtu, checksum offload), or 640 * to clear hardware error conditions 641 */ 642 static void efx_start_datapath(struct efx_nic *efx) 643 { 644 netdev_features_t old_features = efx->net_dev->features; 645 bool old_rx_scatter = efx->rx_scatter; 646 struct efx_tx_queue *tx_queue; 647 struct efx_rx_queue *rx_queue; 648 struct efx_channel *channel; 649 size_t rx_buf_len; 650 651 /* Calculate the rx buffer allocation parameters required to 652 * support the current MTU, including padding for header 653 * alignment and overruns. 654 */ 655 efx->rx_dma_len = (efx->rx_prefix_size + 656 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 657 efx->type->rx_buffer_padding); 658 rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM + 659 efx->rx_ip_align + efx->rx_dma_len); 660 if (rx_buf_len <= PAGE_SIZE) { 661 efx->rx_scatter = efx->type->always_rx_scatter; 662 efx->rx_buffer_order = 0; 663 } else if (efx->type->can_rx_scatter) { 664 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 665 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + 666 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, 667 EFX_RX_BUF_ALIGNMENT) > 668 PAGE_SIZE); 669 efx->rx_scatter = true; 670 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; 671 efx->rx_buffer_order = 0; 672 } else { 673 efx->rx_scatter = false; 674 efx->rx_buffer_order = get_order(rx_buf_len); 675 } 676 677 efx_rx_config_page_split(efx); 678 if (efx->rx_buffer_order) 679 netif_dbg(efx, drv, efx->net_dev, 680 "RX buf len=%u; page order=%u batch=%u\n", 681 efx->rx_dma_len, efx->rx_buffer_order, 682 efx->rx_pages_per_batch); 683 else 684 netif_dbg(efx, drv, efx->net_dev, 685 "RX buf len=%u step=%u bpp=%u; page batch=%u\n", 686 efx->rx_dma_len, efx->rx_page_buf_step, 687 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 688 689 /* Restore previously fixed features in hw_features and remove 690 * features which are fixed now 691 */ 692 efx->net_dev->hw_features |= efx->net_dev->features; 693 efx->net_dev->hw_features &= ~efx->fixed_features; 694 efx->net_dev->features |= efx->fixed_features; 695 if (efx->net_dev->features != old_features) 696 netdev_features_change(efx->net_dev); 697 698 /* RX filters may also have scatter-enabled flags */ 699 if (efx->rx_scatter != old_rx_scatter) 700 efx->type->filter_update_rx_scatter(efx); 701 702 /* We must keep at least one descriptor in a TX ring empty. 703 * We could avoid this when the queue size does not exactly 704 * match the hardware ring size, but it's not that important. 705 * Therefore we stop the queue when one more skb might fill 706 * the ring completely. We wake it when half way back to 707 * empty. 708 */ 709 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); 710 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; 711 712 /* Initialise the channels */ 713 efx_for_each_channel(channel, efx) { 714 efx_for_each_channel_tx_queue(tx_queue, channel) { 715 efx_init_tx_queue(tx_queue); 716 atomic_inc(&efx->active_queues); 717 } 718 719 efx_for_each_channel_rx_queue(rx_queue, channel) { 720 efx_init_rx_queue(rx_queue); 721 atomic_inc(&efx->active_queues); 722 efx_stop_eventq(channel); 723 efx_fast_push_rx_descriptors(rx_queue, false); 724 efx_start_eventq(channel); 725 } 726 727 WARN_ON(channel->rx_pkt_n_frags); 728 } 729 730 efx_ptp_start_datapath(efx); 731 732 if (netif_device_present(efx->net_dev)) 733 netif_tx_wake_all_queues(efx->net_dev); 734 } 735 736 static void efx_stop_datapath(struct efx_nic *efx) 737 { 738 struct efx_channel *channel; 739 struct efx_tx_queue *tx_queue; 740 struct efx_rx_queue *rx_queue; 741 int rc; 742 743 EFX_ASSERT_RESET_SERIALISED(efx); 744 BUG_ON(efx->port_enabled); 745 746 efx_ptp_stop_datapath(efx); 747 748 /* Stop RX refill */ 749 efx_for_each_channel(channel, efx) { 750 efx_for_each_channel_rx_queue(rx_queue, channel) 751 rx_queue->refill_enabled = false; 752 } 753 754 efx_for_each_channel(channel, efx) { 755 /* RX packet processing is pipelined, so wait for the 756 * NAPI handler to complete. At least event queue 0 757 * might be kept active by non-data events, so don't 758 * use napi_synchronize() but actually disable NAPI 759 * temporarily. 760 */ 761 if (efx_channel_has_rx_queue(channel)) { 762 efx_stop_eventq(channel); 763 efx_start_eventq(channel); 764 } 765 } 766 767 rc = efx->type->fini_dmaq(efx); 768 if (rc) { 769 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 770 } else { 771 netif_dbg(efx, drv, efx->net_dev, 772 "successfully flushed all queues\n"); 773 } 774 775 efx_for_each_channel(channel, efx) { 776 efx_for_each_channel_rx_queue(rx_queue, channel) 777 efx_fini_rx_queue(rx_queue); 778 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 779 efx_fini_tx_queue(tx_queue); 780 } 781 efx->xdp_rxq_info_failed = false; 782 } 783 784 static void efx_remove_channel(struct efx_channel *channel) 785 { 786 struct efx_tx_queue *tx_queue; 787 struct efx_rx_queue *rx_queue; 788 789 netif_dbg(channel->efx, drv, channel->efx->net_dev, 790 "destroy chan %d\n", channel->channel); 791 792 efx_for_each_channel_rx_queue(rx_queue, channel) 793 efx_remove_rx_queue(rx_queue); 794 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 795 efx_remove_tx_queue(tx_queue); 796 efx_remove_eventq(channel); 797 channel->type->post_remove(channel); 798 } 799 800 static void efx_remove_channels(struct efx_nic *efx) 801 { 802 struct efx_channel *channel; 803 804 efx_for_each_channel(channel, efx) 805 efx_remove_channel(channel); 806 } 807 808 int 809 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) 810 { 811 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 812 u32 old_rxq_entries, old_txq_entries; 813 unsigned i, next_buffer_table = 0; 814 int rc, rc2; 815 816 rc = efx_check_disabled(efx); 817 if (rc) 818 return rc; 819 820 /* Not all channels should be reallocated. We must avoid 821 * reallocating their buffer table entries. 822 */ 823 efx_for_each_channel(channel, efx) { 824 struct efx_rx_queue *rx_queue; 825 struct efx_tx_queue *tx_queue; 826 827 if (channel->type->copy) 828 continue; 829 next_buffer_table = max(next_buffer_table, 830 channel->eventq.index + 831 channel->eventq.entries); 832 efx_for_each_channel_rx_queue(rx_queue, channel) 833 next_buffer_table = max(next_buffer_table, 834 rx_queue->rxd.index + 835 rx_queue->rxd.entries); 836 efx_for_each_channel_tx_queue(tx_queue, channel) 837 next_buffer_table = max(next_buffer_table, 838 tx_queue->txd.index + 839 tx_queue->txd.entries); 840 } 841 842 efx_device_detach_sync(efx); 843 efx_stop_all(efx); 844 efx_soft_disable_interrupts(efx); 845 846 /* Clone channels (where possible) */ 847 memset(other_channel, 0, sizeof(other_channel)); 848 for (i = 0; i < efx->n_channels; i++) { 849 channel = efx->channel[i]; 850 if (channel->type->copy) 851 channel = channel->type->copy(channel); 852 if (!channel) { 853 rc = -ENOMEM; 854 goto out; 855 } 856 other_channel[i] = channel; 857 } 858 859 /* Swap entry counts and channel pointers */ 860 old_rxq_entries = efx->rxq_entries; 861 old_txq_entries = efx->txq_entries; 862 efx->rxq_entries = rxq_entries; 863 efx->txq_entries = txq_entries; 864 for (i = 0; i < efx->n_channels; i++) { 865 channel = efx->channel[i]; 866 efx->channel[i] = other_channel[i]; 867 other_channel[i] = channel; 868 } 869 870 /* Restart buffer table allocation */ 871 efx->next_buffer_table = next_buffer_table; 872 873 for (i = 0; i < efx->n_channels; i++) { 874 channel = efx->channel[i]; 875 if (!channel->type->copy) 876 continue; 877 rc = efx_probe_channel(channel); 878 if (rc) 879 goto rollback; 880 efx_init_napi_channel(efx->channel[i]); 881 } 882 883 out: 884 /* Destroy unused channel structures */ 885 for (i = 0; i < efx->n_channels; i++) { 886 channel = other_channel[i]; 887 if (channel && channel->type->copy) { 888 efx_fini_napi_channel(channel); 889 efx_remove_channel(channel); 890 kfree(channel); 891 } 892 } 893 894 rc2 = efx_soft_enable_interrupts(efx); 895 if (rc2) { 896 rc = rc ? rc : rc2; 897 netif_err(efx, drv, efx->net_dev, 898 "unable to restart interrupts on channel reallocation\n"); 899 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 900 } else { 901 efx_start_all(efx); 902 efx_device_attach_if_not_resetting(efx); 903 } 904 return rc; 905 906 rollback: 907 /* Swap back */ 908 efx->rxq_entries = old_rxq_entries; 909 efx->txq_entries = old_txq_entries; 910 for (i = 0; i < efx->n_channels; i++) { 911 channel = efx->channel[i]; 912 efx->channel[i] = other_channel[i]; 913 other_channel[i] = channel; 914 } 915 goto out; 916 } 917 918 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 919 { 920 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); 921 } 922 923 static bool efx_default_channel_want_txqs(struct efx_channel *channel) 924 { 925 return channel->channel - channel->efx->tx_channel_offset < 926 channel->efx->n_tx_channels; 927 } 928 929 static const struct efx_channel_type efx_default_channel_type = { 930 .pre_probe = efx_channel_dummy_op_int, 931 .post_remove = efx_channel_dummy_op_void, 932 .get_name = efx_get_channel_name, 933 .copy = efx_copy_channel, 934 .want_txqs = efx_default_channel_want_txqs, 935 .keep_eventq = false, 936 .want_pio = true, 937 }; 938 939 int efx_channel_dummy_op_int(struct efx_channel *channel) 940 { 941 return 0; 942 } 943 944 void efx_channel_dummy_op_void(struct efx_channel *channel) 945 { 946 } 947 948 /************************************************************************** 949 * 950 * Port handling 951 * 952 **************************************************************************/ 953 954 /* This ensures that the kernel is kept informed (via 955 * netif_carrier_on/off) of the link status, and also maintains the 956 * link status's stop on the port's TX queue. 957 */ 958 void efx_link_status_changed(struct efx_nic *efx) 959 { 960 struct efx_link_state *link_state = &efx->link_state; 961 962 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 963 * that no events are triggered between unregister_netdev() and the 964 * driver unloading. A more general condition is that NETDEV_CHANGE 965 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 966 if (!netif_running(efx->net_dev)) 967 return; 968 969 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 970 efx->n_link_state_changes++; 971 972 if (link_state->up) 973 netif_carrier_on(efx->net_dev); 974 else 975 netif_carrier_off(efx->net_dev); 976 } 977 978 /* Status message for kernel log */ 979 if (link_state->up) 980 netif_info(efx, link, efx->net_dev, 981 "link up at %uMbps %s-duplex (MTU %d)\n", 982 link_state->speed, link_state->fd ? "full" : "half", 983 efx->net_dev->mtu); 984 else 985 netif_info(efx, link, efx->net_dev, "link down\n"); 986 } 987 988 void efx_link_set_advertising(struct efx_nic *efx, 989 const unsigned long *advertising) 990 { 991 memcpy(efx->link_advertising, advertising, 992 sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); 993 994 efx->link_advertising[0] |= ADVERTISED_Autoneg; 995 if (advertising[0] & ADVERTISED_Pause) 996 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); 997 else 998 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 999 if (advertising[0] & ADVERTISED_Asym_Pause) 1000 efx->wanted_fc ^= EFX_FC_TX; 1001 } 1002 1003 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not 1004 * force the Autoneg bit on. 1005 */ 1006 void efx_link_clear_advertising(struct efx_nic *efx) 1007 { 1008 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); 1009 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 1010 } 1011 1012 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) 1013 { 1014 efx->wanted_fc = wanted_fc; 1015 if (efx->link_advertising[0]) { 1016 if (wanted_fc & EFX_FC_RX) 1017 efx->link_advertising[0] |= (ADVERTISED_Pause | 1018 ADVERTISED_Asym_Pause); 1019 else 1020 efx->link_advertising[0] &= ~(ADVERTISED_Pause | 1021 ADVERTISED_Asym_Pause); 1022 if (wanted_fc & EFX_FC_TX) 1023 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; 1024 } 1025 } 1026 1027 static void efx_fini_port(struct efx_nic *efx); 1028 1029 /* We assume that efx->type->reconfigure_mac will always try to sync RX 1030 * filters and therefore needs to read-lock the filter table against freeing 1031 */ 1032 void efx_mac_reconfigure(struct efx_nic *efx) 1033 { 1034 down_read(&efx->filter_sem); 1035 efx->type->reconfigure_mac(efx); 1036 up_read(&efx->filter_sem); 1037 } 1038 1039 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure 1040 * the MAC appropriately. All other PHY configuration changes are pushed 1041 * through phy_op->set_settings(), and pushed asynchronously to the MAC 1042 * through efx_monitor(). 1043 * 1044 * Callers must hold the mac_lock 1045 */ 1046 int __efx_reconfigure_port(struct efx_nic *efx) 1047 { 1048 enum efx_phy_mode phy_mode; 1049 int rc; 1050 1051 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 1052 1053 /* Disable PHY transmit in mac level loopbacks */ 1054 phy_mode = efx->phy_mode; 1055 if (LOOPBACK_INTERNAL(efx)) 1056 efx->phy_mode |= PHY_MODE_TX_DISABLED; 1057 else 1058 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 1059 1060 rc = efx->type->reconfigure_port(efx); 1061 1062 if (rc) 1063 efx->phy_mode = phy_mode; 1064 1065 return rc; 1066 } 1067 1068 /* Reinitialise the MAC to pick up new PHY settings, even if the port is 1069 * disabled. */ 1070 int efx_reconfigure_port(struct efx_nic *efx) 1071 { 1072 int rc; 1073 1074 EFX_ASSERT_RESET_SERIALISED(efx); 1075 1076 mutex_lock(&efx->mac_lock); 1077 rc = __efx_reconfigure_port(efx); 1078 mutex_unlock(&efx->mac_lock); 1079 1080 return rc; 1081 } 1082 1083 /* Asynchronous work item for changing MAC promiscuity and multicast 1084 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 1085 * MAC directly. */ 1086 static void efx_mac_work(struct work_struct *data) 1087 { 1088 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 1089 1090 mutex_lock(&efx->mac_lock); 1091 if (efx->port_enabled) 1092 efx_mac_reconfigure(efx); 1093 mutex_unlock(&efx->mac_lock); 1094 } 1095 1096 static int efx_probe_port(struct efx_nic *efx) 1097 { 1098 int rc; 1099 1100 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 1101 1102 if (phy_flash_cfg) 1103 efx->phy_mode = PHY_MODE_SPECIAL; 1104 1105 /* Connect up MAC/PHY operations table */ 1106 rc = efx->type->probe_port(efx); 1107 if (rc) 1108 return rc; 1109 1110 /* Initialise MAC address to permanent address */ 1111 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); 1112 1113 return 0; 1114 } 1115 1116 static int efx_init_port(struct efx_nic *efx) 1117 { 1118 int rc; 1119 1120 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 1121 1122 mutex_lock(&efx->mac_lock); 1123 1124 rc = efx->phy_op->init(efx); 1125 if (rc) 1126 goto fail1; 1127 1128 efx->port_initialized = true; 1129 1130 /* Reconfigure the MAC before creating dma queues (required for 1131 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 1132 efx_mac_reconfigure(efx); 1133 1134 /* Ensure the PHY advertises the correct flow control settings */ 1135 rc = efx->phy_op->reconfigure(efx); 1136 if (rc && rc != -EPERM) 1137 goto fail2; 1138 1139 mutex_unlock(&efx->mac_lock); 1140 return 0; 1141 1142 fail2: 1143 efx->phy_op->fini(efx); 1144 fail1: 1145 mutex_unlock(&efx->mac_lock); 1146 return rc; 1147 } 1148 1149 static void efx_start_port(struct efx_nic *efx) 1150 { 1151 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 1152 BUG_ON(efx->port_enabled); 1153 1154 mutex_lock(&efx->mac_lock); 1155 efx->port_enabled = true; 1156 1157 /* Ensure MAC ingress/egress is enabled */ 1158 efx_mac_reconfigure(efx); 1159 1160 mutex_unlock(&efx->mac_lock); 1161 } 1162 1163 /* Cancel work for MAC reconfiguration, periodic hardware monitoring 1164 * and the async self-test, wait for them to finish and prevent them 1165 * being scheduled again. This doesn't cover online resets, which 1166 * should only be cancelled when removing the device. 1167 */ 1168 static void efx_stop_port(struct efx_nic *efx) 1169 { 1170 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 1171 1172 EFX_ASSERT_RESET_SERIALISED(efx); 1173 1174 mutex_lock(&efx->mac_lock); 1175 efx->port_enabled = false; 1176 mutex_unlock(&efx->mac_lock); 1177 1178 /* Serialise against efx_set_multicast_list() */ 1179 netif_addr_lock_bh(efx->net_dev); 1180 netif_addr_unlock_bh(efx->net_dev); 1181 1182 cancel_delayed_work_sync(&efx->monitor_work); 1183 efx_selftest_async_cancel(efx); 1184 cancel_work_sync(&efx->mac_work); 1185 } 1186 1187 static void efx_fini_port(struct efx_nic *efx) 1188 { 1189 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 1190 1191 if (!efx->port_initialized) 1192 return; 1193 1194 efx->phy_op->fini(efx); 1195 efx->port_initialized = false; 1196 1197 efx->link_state.up = false; 1198 efx_link_status_changed(efx); 1199 } 1200 1201 static void efx_remove_port(struct efx_nic *efx) 1202 { 1203 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 1204 1205 efx->type->remove_port(efx); 1206 } 1207 1208 /************************************************************************** 1209 * 1210 * NIC handling 1211 * 1212 **************************************************************************/ 1213 1214 static LIST_HEAD(efx_primary_list); 1215 static LIST_HEAD(efx_unassociated_list); 1216 1217 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 1218 { 1219 return left->type == right->type && 1220 left->vpd_sn && right->vpd_sn && 1221 !strcmp(left->vpd_sn, right->vpd_sn); 1222 } 1223 1224 static void efx_associate(struct efx_nic *efx) 1225 { 1226 struct efx_nic *other, *next; 1227 1228 if (efx->primary == efx) { 1229 /* Adding primary function; look for secondaries */ 1230 1231 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 1232 list_add_tail(&efx->node, &efx_primary_list); 1233 1234 list_for_each_entry_safe(other, next, &efx_unassociated_list, 1235 node) { 1236 if (efx_same_controller(efx, other)) { 1237 list_del(&other->node); 1238 netif_dbg(other, probe, other->net_dev, 1239 "moving to secondary list of %s %s\n", 1240 pci_name(efx->pci_dev), 1241 efx->net_dev->name); 1242 list_add_tail(&other->node, 1243 &efx->secondary_list); 1244 other->primary = efx; 1245 } 1246 } 1247 } else { 1248 /* Adding secondary function; look for primary */ 1249 1250 list_for_each_entry(other, &efx_primary_list, node) { 1251 if (efx_same_controller(efx, other)) { 1252 netif_dbg(efx, probe, efx->net_dev, 1253 "adding to secondary list of %s %s\n", 1254 pci_name(other->pci_dev), 1255 other->net_dev->name); 1256 list_add_tail(&efx->node, 1257 &other->secondary_list); 1258 efx->primary = other; 1259 return; 1260 } 1261 } 1262 1263 netif_dbg(efx, probe, efx->net_dev, 1264 "adding to unassociated list\n"); 1265 list_add_tail(&efx->node, &efx_unassociated_list); 1266 } 1267 } 1268 1269 static void efx_dissociate(struct efx_nic *efx) 1270 { 1271 struct efx_nic *other, *next; 1272 1273 list_del(&efx->node); 1274 efx->primary = NULL; 1275 1276 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 1277 list_del(&other->node); 1278 netif_dbg(other, probe, other->net_dev, 1279 "moving to unassociated list\n"); 1280 list_add_tail(&other->node, &efx_unassociated_list); 1281 other->primary = NULL; 1282 } 1283 } 1284 1285 /* This configures the PCI device to enable I/O and DMA. */ 1286 static int efx_init_io(struct efx_nic *efx) 1287 { 1288 struct pci_dev *pci_dev = efx->pci_dev; 1289 dma_addr_t dma_mask = efx->type->max_dma_mask; 1290 unsigned int mem_map_size = efx->type->mem_map_size(efx); 1291 int rc, bar; 1292 1293 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1294 1295 bar = efx->type->mem_bar(efx); 1296 1297 rc = pci_enable_device(pci_dev); 1298 if (rc) { 1299 netif_err(efx, probe, efx->net_dev, 1300 "failed to enable PCI device\n"); 1301 goto fail1; 1302 } 1303 1304 pci_set_master(pci_dev); 1305 1306 /* Set the PCI DMA mask. Try all possibilities from our genuine mask 1307 * down to 32 bits, because some architectures will allow 40 bit 1308 * masks event though they reject 46 bit masks. 1309 */ 1310 while (dma_mask > 0x7fffffffUL) { 1311 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); 1312 if (rc == 0) 1313 break; 1314 dma_mask >>= 1; 1315 } 1316 if (rc) { 1317 netif_err(efx, probe, efx->net_dev, 1318 "could not find a suitable DMA mask\n"); 1319 goto fail2; 1320 } 1321 netif_dbg(efx, probe, efx->net_dev, 1322 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1323 1324 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); 1325 rc = pci_request_region(pci_dev, bar, "sfc"); 1326 if (rc) { 1327 netif_err(efx, probe, efx->net_dev, 1328 "request for memory BAR failed\n"); 1329 rc = -EIO; 1330 goto fail3; 1331 } 1332 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); 1333 if (!efx->membase) { 1334 netif_err(efx, probe, efx->net_dev, 1335 "could not map memory BAR at %llx+%x\n", 1336 (unsigned long long)efx->membase_phys, mem_map_size); 1337 rc = -ENOMEM; 1338 goto fail4; 1339 } 1340 netif_dbg(efx, probe, efx->net_dev, 1341 "memory BAR at %llx+%x (virtual %p)\n", 1342 (unsigned long long)efx->membase_phys, mem_map_size, 1343 efx->membase); 1344 1345 return 0; 1346 1347 fail4: 1348 pci_release_region(efx->pci_dev, bar); 1349 fail3: 1350 efx->membase_phys = 0; 1351 fail2: 1352 pci_disable_device(efx->pci_dev); 1353 fail1: 1354 return rc; 1355 } 1356 1357 static void efx_fini_io(struct efx_nic *efx) 1358 { 1359 int bar; 1360 1361 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 1362 1363 if (efx->membase) { 1364 iounmap(efx->membase); 1365 efx->membase = NULL; 1366 } 1367 1368 if (efx->membase_phys) { 1369 bar = efx->type->mem_bar(efx); 1370 pci_release_region(efx->pci_dev, bar); 1371 efx->membase_phys = 0; 1372 } 1373 1374 /* Don't disable bus-mastering if VFs are assigned */ 1375 if (!pci_vfs_assigned(efx->pci_dev)) 1376 pci_disable_device(efx->pci_dev); 1377 } 1378 1379 void efx_set_default_rx_indir_table(struct efx_nic *efx, 1380 struct efx_rss_context *ctx) 1381 { 1382 size_t i; 1383 1384 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) 1385 ctx->rx_indir_table[i] = 1386 ethtool_rxfh_indir_default(i, efx->rss_spread); 1387 } 1388 1389 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) 1390 { 1391 cpumask_var_t thread_mask; 1392 unsigned int count; 1393 int cpu; 1394 1395 if (rss_cpus) { 1396 count = rss_cpus; 1397 } else { 1398 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 1399 netif_warn(efx, probe, efx->net_dev, 1400 "RSS disabled due to allocation failure\n"); 1401 return 1; 1402 } 1403 1404 count = 0; 1405 for_each_online_cpu(cpu) { 1406 if (!cpumask_test_cpu(cpu, thread_mask)) { 1407 ++count; 1408 cpumask_or(thread_mask, thread_mask, 1409 topology_sibling_cpumask(cpu)); 1410 } 1411 } 1412 1413 free_cpumask_var(thread_mask); 1414 } 1415 1416 if (count > EFX_MAX_RX_QUEUES) { 1417 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, 1418 "Reducing number of rx queues from %u to %u.\n", 1419 count, EFX_MAX_RX_QUEUES); 1420 count = EFX_MAX_RX_QUEUES; 1421 } 1422 1423 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1424 * table entries that are inaccessible to VFs 1425 */ 1426 #ifdef CONFIG_SFC_SRIOV 1427 if (efx->type->sriov_wanted) { 1428 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && 1429 count > efx_vf_size(efx)) { 1430 netif_warn(efx, probe, efx->net_dev, 1431 "Reducing number of RSS channels from %u to %u for " 1432 "VF support. Increase vf-msix-limit to use more " 1433 "channels on the PF.\n", 1434 count, efx_vf_size(efx)); 1435 count = efx_vf_size(efx); 1436 } 1437 } 1438 #endif 1439 1440 return count; 1441 } 1442 1443 /* Probe the number and type of interrupts we are able to obtain, and 1444 * the resulting numbers of channels and RX queues. 1445 */ 1446 static int efx_probe_interrupts(struct efx_nic *efx) 1447 { 1448 unsigned int extra_channels = 0; 1449 unsigned int i, j; 1450 int rc; 1451 1452 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) 1453 if (efx->extra_channel_type[i]) 1454 ++extra_channels; 1455 1456 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1457 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1458 unsigned int n_channels; 1459 1460 n_channels = efx_wanted_parallelism(efx); 1461 if (efx_separate_tx_channels) 1462 n_channels *= 2; 1463 n_channels += extra_channels; 1464 n_channels = min(n_channels, efx->max_channels); 1465 1466 for (i = 0; i < n_channels; i++) 1467 xentries[i].entry = i; 1468 rc = pci_enable_msix_range(efx->pci_dev, 1469 xentries, 1, n_channels); 1470 if (rc < 0) { 1471 /* Fall back to single channel MSI */ 1472 netif_err(efx, drv, efx->net_dev, 1473 "could not enable MSI-X\n"); 1474 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) 1475 efx->interrupt_mode = EFX_INT_MODE_MSI; 1476 else 1477 return rc; 1478 } else if (rc < n_channels) { 1479 netif_err(efx, drv, efx->net_dev, 1480 "WARNING: Insufficient MSI-X vectors" 1481 " available (%d < %u).\n", rc, n_channels); 1482 netif_err(efx, drv, efx->net_dev, 1483 "WARNING: Performance may be reduced.\n"); 1484 n_channels = rc; 1485 } 1486 1487 if (rc > 0) { 1488 efx->n_channels = n_channels; 1489 if (n_channels > extra_channels) 1490 n_channels -= extra_channels; 1491 if (efx_separate_tx_channels) { 1492 efx->n_tx_channels = min(max(n_channels / 2, 1493 1U), 1494 efx->max_tx_channels); 1495 efx->n_rx_channels = max(n_channels - 1496 efx->n_tx_channels, 1497 1U); 1498 } else { 1499 efx->n_tx_channels = min(n_channels, 1500 efx->max_tx_channels); 1501 efx->n_rx_channels = n_channels; 1502 } 1503 for (i = 0; i < efx->n_channels; i++) 1504 efx_get_channel(efx, i)->irq = 1505 xentries[i].vector; 1506 } 1507 } 1508 1509 /* Try single interrupt MSI */ 1510 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1511 efx->n_channels = 1; 1512 efx->n_rx_channels = 1; 1513 efx->n_tx_channels = 1; 1514 rc = pci_enable_msi(efx->pci_dev); 1515 if (rc == 0) { 1516 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; 1517 } else { 1518 netif_err(efx, drv, efx->net_dev, 1519 "could not enable MSI\n"); 1520 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) 1521 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1522 else 1523 return rc; 1524 } 1525 } 1526 1527 /* Assume legacy interrupts */ 1528 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1529 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); 1530 efx->n_rx_channels = 1; 1531 efx->n_tx_channels = 1; 1532 efx->legacy_irq = efx->pci_dev->irq; 1533 } 1534 1535 /* Assign extra channels if possible */ 1536 efx->n_extra_tx_channels = 0; 1537 j = efx->n_channels; 1538 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { 1539 if (!efx->extra_channel_type[i]) 1540 continue; 1541 if (efx->interrupt_mode != EFX_INT_MODE_MSIX || 1542 efx->n_channels <= extra_channels) { 1543 efx->extra_channel_type[i]->handle_no_channel(efx); 1544 } else { 1545 --j; 1546 efx_get_channel(efx, j)->type = 1547 efx->extra_channel_type[i]; 1548 if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) 1549 efx->n_extra_tx_channels++; 1550 } 1551 } 1552 1553 /* RSS might be usable on VFs even if it is disabled on the PF */ 1554 #ifdef CONFIG_SFC_SRIOV 1555 if (efx->type->sriov_wanted) { 1556 efx->rss_spread = ((efx->n_rx_channels > 1 || 1557 !efx->type->sriov_wanted(efx)) ? 1558 efx->n_rx_channels : efx_vf_size(efx)); 1559 return 0; 1560 } 1561 #endif 1562 efx->rss_spread = efx->n_rx_channels; 1563 1564 return 0; 1565 } 1566 1567 #if defined(CONFIG_SMP) 1568 static void efx_set_interrupt_affinity(struct efx_nic *efx) 1569 { 1570 struct efx_channel *channel; 1571 unsigned int cpu; 1572 1573 efx_for_each_channel(channel, efx) { 1574 cpu = cpumask_local_spread(channel->channel, 1575 pcibus_to_node(efx->pci_dev->bus)); 1576 irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); 1577 } 1578 } 1579 1580 static void efx_clear_interrupt_affinity(struct efx_nic *efx) 1581 { 1582 struct efx_channel *channel; 1583 1584 efx_for_each_channel(channel, efx) 1585 irq_set_affinity_hint(channel->irq, NULL); 1586 } 1587 #else 1588 static void 1589 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) 1590 { 1591 } 1592 1593 static void 1594 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) 1595 { 1596 } 1597 #endif /* CONFIG_SMP */ 1598 1599 static int efx_soft_enable_interrupts(struct efx_nic *efx) 1600 { 1601 struct efx_channel *channel, *end_channel; 1602 int rc; 1603 1604 BUG_ON(efx->state == STATE_DISABLED); 1605 1606 efx->irq_soft_enabled = true; 1607 smp_wmb(); 1608 1609 efx_for_each_channel(channel, efx) { 1610 if (!channel->type->keep_eventq) { 1611 rc = efx_init_eventq(channel); 1612 if (rc) 1613 goto fail; 1614 } 1615 efx_start_eventq(channel); 1616 } 1617 1618 efx_mcdi_mode_event(efx); 1619 1620 return 0; 1621 fail: 1622 end_channel = channel; 1623 efx_for_each_channel(channel, efx) { 1624 if (channel == end_channel) 1625 break; 1626 efx_stop_eventq(channel); 1627 if (!channel->type->keep_eventq) 1628 efx_fini_eventq(channel); 1629 } 1630 1631 return rc; 1632 } 1633 1634 static void efx_soft_disable_interrupts(struct efx_nic *efx) 1635 { 1636 struct efx_channel *channel; 1637 1638 if (efx->state == STATE_DISABLED) 1639 return; 1640 1641 efx_mcdi_mode_poll(efx); 1642 1643 efx->irq_soft_enabled = false; 1644 smp_wmb(); 1645 1646 if (efx->legacy_irq) 1647 synchronize_irq(efx->legacy_irq); 1648 1649 efx_for_each_channel(channel, efx) { 1650 if (channel->irq) 1651 synchronize_irq(channel->irq); 1652 1653 efx_stop_eventq(channel); 1654 if (!channel->type->keep_eventq) 1655 efx_fini_eventq(channel); 1656 } 1657 1658 /* Flush the asynchronous MCDI request queue */ 1659 efx_mcdi_flush_async(efx); 1660 } 1661 1662 static int efx_enable_interrupts(struct efx_nic *efx) 1663 { 1664 struct efx_channel *channel, *end_channel; 1665 int rc; 1666 1667 BUG_ON(efx->state == STATE_DISABLED); 1668 1669 if (efx->eeh_disabled_legacy_irq) { 1670 enable_irq(efx->legacy_irq); 1671 efx->eeh_disabled_legacy_irq = false; 1672 } 1673 1674 efx->type->irq_enable_master(efx); 1675 1676 efx_for_each_channel(channel, efx) { 1677 if (channel->type->keep_eventq) { 1678 rc = efx_init_eventq(channel); 1679 if (rc) 1680 goto fail; 1681 } 1682 } 1683 1684 rc = efx_soft_enable_interrupts(efx); 1685 if (rc) 1686 goto fail; 1687 1688 return 0; 1689 1690 fail: 1691 end_channel = channel; 1692 efx_for_each_channel(channel, efx) { 1693 if (channel == end_channel) 1694 break; 1695 if (channel->type->keep_eventq) 1696 efx_fini_eventq(channel); 1697 } 1698 1699 efx->type->irq_disable_non_ev(efx); 1700 1701 return rc; 1702 } 1703 1704 static void efx_disable_interrupts(struct efx_nic *efx) 1705 { 1706 struct efx_channel *channel; 1707 1708 efx_soft_disable_interrupts(efx); 1709 1710 efx_for_each_channel(channel, efx) { 1711 if (channel->type->keep_eventq) 1712 efx_fini_eventq(channel); 1713 } 1714 1715 efx->type->irq_disable_non_ev(efx); 1716 } 1717 1718 static void efx_remove_interrupts(struct efx_nic *efx) 1719 { 1720 struct efx_channel *channel; 1721 1722 /* Remove MSI/MSI-X interrupts */ 1723 efx_for_each_channel(channel, efx) 1724 channel->irq = 0; 1725 pci_disable_msi(efx->pci_dev); 1726 pci_disable_msix(efx->pci_dev); 1727 1728 /* Remove legacy interrupt */ 1729 efx->legacy_irq = 0; 1730 } 1731 1732 static void efx_set_channels(struct efx_nic *efx) 1733 { 1734 struct efx_channel *channel; 1735 struct efx_tx_queue *tx_queue; 1736 1737 efx->tx_channel_offset = 1738 efx_separate_tx_channels ? 1739 efx->n_channels - efx->n_tx_channels : 0; 1740 1741 /* We need to mark which channels really have RX and TX 1742 * queues, and adjust the TX queue numbers if we have separate 1743 * RX-only and TX-only channels. 1744 */ 1745 efx_for_each_channel(channel, efx) { 1746 if (channel->channel < efx->n_rx_channels) 1747 channel->rx_queue.core_index = channel->channel; 1748 else 1749 channel->rx_queue.core_index = -1; 1750 1751 efx_for_each_channel_tx_queue(tx_queue, channel) 1752 tx_queue->queue -= (efx->tx_channel_offset * 1753 EFX_TXQ_TYPES); 1754 } 1755 } 1756 1757 static int efx_probe_nic(struct efx_nic *efx) 1758 { 1759 int rc; 1760 1761 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 1762 1763 /* Carry out hardware-type specific initialisation */ 1764 rc = efx->type->probe(efx); 1765 if (rc) 1766 return rc; 1767 1768 do { 1769 if (!efx->max_channels || !efx->max_tx_channels) { 1770 netif_err(efx, drv, efx->net_dev, 1771 "Insufficient resources to allocate" 1772 " any channels\n"); 1773 rc = -ENOSPC; 1774 goto fail1; 1775 } 1776 1777 /* Determine the number of channels and queues by trying 1778 * to hook in MSI-X interrupts. 1779 */ 1780 rc = efx_probe_interrupts(efx); 1781 if (rc) 1782 goto fail1; 1783 1784 efx_set_channels(efx); 1785 1786 /* dimension_resources can fail with EAGAIN */ 1787 rc = efx->type->dimension_resources(efx); 1788 if (rc != 0 && rc != -EAGAIN) 1789 goto fail2; 1790 1791 if (rc == -EAGAIN) 1792 /* try again with new max_channels */ 1793 efx_remove_interrupts(efx); 1794 1795 } while (rc == -EAGAIN); 1796 1797 if (efx->n_channels > 1) 1798 netdev_rss_key_fill(efx->rss_context.rx_hash_key, 1799 sizeof(efx->rss_context.rx_hash_key)); 1800 efx_set_default_rx_indir_table(efx, &efx->rss_context); 1801 1802 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1803 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1804 1805 /* Initialise the interrupt moderation settings */ 1806 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 1807 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 1808 true); 1809 1810 return 0; 1811 1812 fail2: 1813 efx_remove_interrupts(efx); 1814 fail1: 1815 efx->type->remove(efx); 1816 return rc; 1817 } 1818 1819 static void efx_remove_nic(struct efx_nic *efx) 1820 { 1821 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 1822 1823 efx_remove_interrupts(efx); 1824 efx->type->remove(efx); 1825 } 1826 1827 static int efx_probe_filters(struct efx_nic *efx) 1828 { 1829 int rc; 1830 1831 init_rwsem(&efx->filter_sem); 1832 mutex_lock(&efx->mac_lock); 1833 down_write(&efx->filter_sem); 1834 rc = efx->type->filter_table_probe(efx); 1835 if (rc) 1836 goto out_unlock; 1837 1838 #ifdef CONFIG_RFS_ACCEL 1839 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1840 struct efx_channel *channel; 1841 int i, success = 1; 1842 1843 efx_for_each_channel(channel, efx) { 1844 channel->rps_flow_id = 1845 kcalloc(efx->type->max_rx_ip_filters, 1846 sizeof(*channel->rps_flow_id), 1847 GFP_KERNEL); 1848 if (!channel->rps_flow_id) 1849 success = 0; 1850 else 1851 for (i = 0; 1852 i < efx->type->max_rx_ip_filters; 1853 ++i) 1854 channel->rps_flow_id[i] = 1855 RPS_FLOW_ID_INVALID; 1856 } 1857 1858 if (!success) { 1859 efx_for_each_channel(channel, efx) 1860 kfree(channel->rps_flow_id); 1861 efx->type->filter_table_remove(efx); 1862 rc = -ENOMEM; 1863 goto out_unlock; 1864 } 1865 1866 efx->rps_expire_index = efx->rps_expire_channel = 0; 1867 } 1868 #endif 1869 out_unlock: 1870 up_write(&efx->filter_sem); 1871 mutex_unlock(&efx->mac_lock); 1872 return rc; 1873 } 1874 1875 static void efx_remove_filters(struct efx_nic *efx) 1876 { 1877 #ifdef CONFIG_RFS_ACCEL 1878 struct efx_channel *channel; 1879 1880 efx_for_each_channel(channel, efx) 1881 kfree(channel->rps_flow_id); 1882 #endif 1883 down_write(&efx->filter_sem); 1884 efx->type->filter_table_remove(efx); 1885 up_write(&efx->filter_sem); 1886 } 1887 1888 1889 /************************************************************************** 1890 * 1891 * NIC startup/shutdown 1892 * 1893 *************************************************************************/ 1894 1895 static int efx_probe_all(struct efx_nic *efx) 1896 { 1897 int rc; 1898 1899 rc = efx_probe_nic(efx); 1900 if (rc) { 1901 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1902 goto fail1; 1903 } 1904 1905 rc = efx_probe_port(efx); 1906 if (rc) { 1907 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1908 goto fail2; 1909 } 1910 1911 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 1912 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 1913 rc = -EINVAL; 1914 goto fail3; 1915 } 1916 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1917 1918 #ifdef CONFIG_SFC_SRIOV 1919 rc = efx->type->vswitching_probe(efx); 1920 if (rc) /* not fatal; the PF will still work fine */ 1921 netif_warn(efx, probe, efx->net_dev, 1922 "failed to setup vswitching rc=%d;" 1923 " VFs may not function\n", rc); 1924 #endif 1925 1926 rc = efx_probe_filters(efx); 1927 if (rc) { 1928 netif_err(efx, probe, efx->net_dev, 1929 "failed to create filter tables\n"); 1930 goto fail4; 1931 } 1932 1933 rc = efx_probe_channels(efx); 1934 if (rc) 1935 goto fail5; 1936 1937 return 0; 1938 1939 fail5: 1940 efx_remove_filters(efx); 1941 fail4: 1942 #ifdef CONFIG_SFC_SRIOV 1943 efx->type->vswitching_remove(efx); 1944 #endif 1945 fail3: 1946 efx_remove_port(efx); 1947 fail2: 1948 efx_remove_nic(efx); 1949 fail1: 1950 return rc; 1951 } 1952 1953 /* If the interface is supposed to be running but is not, start 1954 * the hardware and software data path, regular activity for the port 1955 * (MAC statistics, link polling, etc.) and schedule the port to be 1956 * reconfigured. Interrupts must already be enabled. This function 1957 * is safe to call multiple times, so long as the NIC is not disabled. 1958 * Requires the RTNL lock. 1959 */ 1960 static void efx_start_all(struct efx_nic *efx) 1961 { 1962 EFX_ASSERT_RESET_SERIALISED(efx); 1963 BUG_ON(efx->state == STATE_DISABLED); 1964 1965 /* Check that it is appropriate to restart the interface. All 1966 * of these flags are safe to read under just the rtnl lock */ 1967 if (efx->port_enabled || !netif_running(efx->net_dev) || 1968 efx->reset_pending) 1969 return; 1970 1971 efx_start_port(efx); 1972 efx_start_datapath(efx); 1973 1974 /* Start the hardware monitor if there is one */ 1975 if (efx->type->monitor != NULL) 1976 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1977 efx_monitor_interval); 1978 1979 /* Link state detection is normally event-driven; we have 1980 * to poll now because we could have missed a change 1981 */ 1982 mutex_lock(&efx->mac_lock); 1983 if (efx->phy_op->poll(efx)) 1984 efx_link_status_changed(efx); 1985 mutex_unlock(&efx->mac_lock); 1986 1987 efx->type->start_stats(efx); 1988 efx->type->pull_stats(efx); 1989 spin_lock_bh(&efx->stats_lock); 1990 efx->type->update_stats(efx, NULL, NULL); 1991 spin_unlock_bh(&efx->stats_lock); 1992 } 1993 1994 /* Quiesce the hardware and software data path, and regular activity 1995 * for the port without bringing the link down. Safe to call multiple 1996 * times with the NIC in almost any state, but interrupts should be 1997 * enabled. Requires the RTNL lock. 1998 */ 1999 static void efx_stop_all(struct efx_nic *efx) 2000 { 2001 EFX_ASSERT_RESET_SERIALISED(efx); 2002 2003 /* port_enabled can be read safely under the rtnl lock */ 2004 if (!efx->port_enabled) 2005 return; 2006 2007 /* update stats before we go down so we can accurately count 2008 * rx_nodesc_drops 2009 */ 2010 efx->type->pull_stats(efx); 2011 spin_lock_bh(&efx->stats_lock); 2012 efx->type->update_stats(efx, NULL, NULL); 2013 spin_unlock_bh(&efx->stats_lock); 2014 efx->type->stop_stats(efx); 2015 efx_stop_port(efx); 2016 2017 /* Stop the kernel transmit interface. This is only valid if 2018 * the device is stopped or detached; otherwise the watchdog 2019 * may fire immediately. 2020 */ 2021 WARN_ON(netif_running(efx->net_dev) && 2022 netif_device_present(efx->net_dev)); 2023 netif_tx_disable(efx->net_dev); 2024 2025 efx_stop_datapath(efx); 2026 } 2027 2028 static void efx_remove_all(struct efx_nic *efx) 2029 { 2030 rtnl_lock(); 2031 efx_xdp_setup_prog(efx, NULL); 2032 rtnl_unlock(); 2033 2034 efx_remove_channels(efx); 2035 efx_remove_filters(efx); 2036 #ifdef CONFIG_SFC_SRIOV 2037 efx->type->vswitching_remove(efx); 2038 #endif 2039 efx_remove_port(efx); 2040 efx_remove_nic(efx); 2041 } 2042 2043 /************************************************************************** 2044 * 2045 * Interrupt moderation 2046 * 2047 **************************************************************************/ 2048 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) 2049 { 2050 if (usecs == 0) 2051 return 0; 2052 if (usecs * 1000 < efx->timer_quantum_ns) 2053 return 1; /* never round down to 0 */ 2054 return usecs * 1000 / efx->timer_quantum_ns; 2055 } 2056 2057 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) 2058 { 2059 /* We must round up when converting ticks to microseconds 2060 * because we round down when converting the other way. 2061 */ 2062 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); 2063 } 2064 2065 /* Set interrupt moderation parameters */ 2066 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 2067 unsigned int rx_usecs, bool rx_adaptive, 2068 bool rx_may_override_tx) 2069 { 2070 struct efx_channel *channel; 2071 unsigned int timer_max_us; 2072 2073 EFX_ASSERT_RESET_SERIALISED(efx); 2074 2075 timer_max_us = efx->timer_max_ns / 1000; 2076 2077 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 2078 return -EINVAL; 2079 2080 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 2081 !rx_may_override_tx) { 2082 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 2083 "RX and TX IRQ moderation must be equal\n"); 2084 return -EINVAL; 2085 } 2086 2087 efx->irq_rx_adaptive = rx_adaptive; 2088 efx->irq_rx_moderation_us = rx_usecs; 2089 efx_for_each_channel(channel, efx) { 2090 if (efx_channel_has_rx_queue(channel)) 2091 channel->irq_moderation_us = rx_usecs; 2092 else if (efx_channel_has_tx_queues(channel)) 2093 channel->irq_moderation_us = tx_usecs; 2094 } 2095 2096 return 0; 2097 } 2098 2099 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 2100 unsigned int *rx_usecs, bool *rx_adaptive) 2101 { 2102 *rx_adaptive = efx->irq_rx_adaptive; 2103 *rx_usecs = efx->irq_rx_moderation_us; 2104 2105 /* If channels are shared between RX and TX, so is IRQ 2106 * moderation. Otherwise, IRQ moderation is the same for all 2107 * TX channels and is not adaptive. 2108 */ 2109 if (efx->tx_channel_offset == 0) { 2110 *tx_usecs = *rx_usecs; 2111 } else { 2112 struct efx_channel *tx_channel; 2113 2114 tx_channel = efx->channel[efx->tx_channel_offset]; 2115 *tx_usecs = tx_channel->irq_moderation_us; 2116 } 2117 } 2118 2119 /************************************************************************** 2120 * 2121 * Hardware monitor 2122 * 2123 **************************************************************************/ 2124 2125 /* Run periodically off the general workqueue */ 2126 static void efx_monitor(struct work_struct *data) 2127 { 2128 struct efx_nic *efx = container_of(data, struct efx_nic, 2129 monitor_work.work); 2130 2131 netif_vdbg(efx, timer, efx->net_dev, 2132 "hardware monitor executing on CPU %d\n", 2133 raw_smp_processor_id()); 2134 BUG_ON(efx->type->monitor == NULL); 2135 2136 /* If the mac_lock is already held then it is likely a port 2137 * reconfiguration is already in place, which will likely do 2138 * most of the work of monitor() anyway. */ 2139 if (mutex_trylock(&efx->mac_lock)) { 2140 if (efx->port_enabled) 2141 efx->type->monitor(efx); 2142 mutex_unlock(&efx->mac_lock); 2143 } 2144 2145 queue_delayed_work(efx->workqueue, &efx->monitor_work, 2146 efx_monitor_interval); 2147 } 2148 2149 /************************************************************************** 2150 * 2151 * ioctls 2152 * 2153 *************************************************************************/ 2154 2155 /* Net device ioctl 2156 * Context: process, rtnl_lock() held. 2157 */ 2158 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 2159 { 2160 struct efx_nic *efx = netdev_priv(net_dev); 2161 struct mii_ioctl_data *data = if_mii(ifr); 2162 2163 if (cmd == SIOCSHWTSTAMP) 2164 return efx_ptp_set_ts_config(efx, ifr); 2165 if (cmd == SIOCGHWTSTAMP) 2166 return efx_ptp_get_ts_config(efx, ifr); 2167 2168 /* Convert phy_id from older PRTAD/DEVAD format */ 2169 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 2170 (data->phy_id & 0xfc00) == 0x0400) 2171 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 2172 2173 return mdio_mii_ioctl(&efx->mdio, data, cmd); 2174 } 2175 2176 /************************************************************************** 2177 * 2178 * NAPI interface 2179 * 2180 **************************************************************************/ 2181 2182 static void efx_init_napi_channel(struct efx_channel *channel) 2183 { 2184 struct efx_nic *efx = channel->efx; 2185 2186 channel->napi_dev = efx->net_dev; 2187 netif_napi_add(channel->napi_dev, &channel->napi_str, 2188 efx_poll, napi_weight); 2189 } 2190 2191 static void efx_init_napi(struct efx_nic *efx) 2192 { 2193 struct efx_channel *channel; 2194 2195 efx_for_each_channel(channel, efx) 2196 efx_init_napi_channel(channel); 2197 } 2198 2199 static void efx_fini_napi_channel(struct efx_channel *channel) 2200 { 2201 if (channel->napi_dev) 2202 netif_napi_del(&channel->napi_str); 2203 2204 channel->napi_dev = NULL; 2205 } 2206 2207 static void efx_fini_napi(struct efx_nic *efx) 2208 { 2209 struct efx_channel *channel; 2210 2211 efx_for_each_channel(channel, efx) 2212 efx_fini_napi_channel(channel); 2213 } 2214 2215 /************************************************************************** 2216 * 2217 * Kernel net device interface 2218 * 2219 *************************************************************************/ 2220 2221 /* Context: process, rtnl_lock() held. */ 2222 int efx_net_open(struct net_device *net_dev) 2223 { 2224 struct efx_nic *efx = netdev_priv(net_dev); 2225 int rc; 2226 2227 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 2228 raw_smp_processor_id()); 2229 2230 rc = efx_check_disabled(efx); 2231 if (rc) 2232 return rc; 2233 if (efx->phy_mode & PHY_MODE_SPECIAL) 2234 return -EBUSY; 2235 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 2236 return -EIO; 2237 2238 /* Notify the kernel of the link state polled during driver load, 2239 * before the monitor starts running */ 2240 efx_link_status_changed(efx); 2241 2242 efx_start_all(efx); 2243 if (efx->state == STATE_DISABLED || efx->reset_pending) 2244 netif_device_detach(efx->net_dev); 2245 efx_selftest_async_start(efx); 2246 return 0; 2247 } 2248 2249 /* Context: process, rtnl_lock() held. 2250 * Note that the kernel will ignore our return code; this method 2251 * should really be a void. 2252 */ 2253 int efx_net_stop(struct net_device *net_dev) 2254 { 2255 struct efx_nic *efx = netdev_priv(net_dev); 2256 2257 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 2258 raw_smp_processor_id()); 2259 2260 /* Stop the device and flush all the channels */ 2261 efx_stop_all(efx); 2262 2263 return 0; 2264 } 2265 2266 /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 2267 static void efx_net_stats(struct net_device *net_dev, 2268 struct rtnl_link_stats64 *stats) 2269 { 2270 struct efx_nic *efx = netdev_priv(net_dev); 2271 2272 spin_lock_bh(&efx->stats_lock); 2273 efx->type->update_stats(efx, NULL, stats); 2274 spin_unlock_bh(&efx->stats_lock); 2275 } 2276 2277 /* Context: netif_tx_lock held, BHs disabled. */ 2278 static void efx_watchdog(struct net_device *net_dev) 2279 { 2280 struct efx_nic *efx = netdev_priv(net_dev); 2281 2282 netif_err(efx, tx_err, efx->net_dev, 2283 "TX stuck with port_enabled=%d: resetting channels\n", 2284 efx->port_enabled); 2285 2286 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 2287 } 2288 2289 static unsigned int efx_xdp_max_mtu(struct efx_nic *efx) 2290 { 2291 /* The maximum MTU that we can fit in a single page, allowing for 2292 * framing, overhead and XDP headroom. 2293 */ 2294 int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) + 2295 efx->rx_prefix_size + efx->type->rx_buffer_padding + 2296 efx->rx_ip_align + XDP_PACKET_HEADROOM; 2297 2298 return PAGE_SIZE - overhead; 2299 } 2300 2301 /* Context: process, rtnl_lock() held. */ 2302 static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 2303 { 2304 struct efx_nic *efx = netdev_priv(net_dev); 2305 int rc; 2306 2307 rc = efx_check_disabled(efx); 2308 if (rc) 2309 return rc; 2310 2311 if (rtnl_dereference(efx->xdp_prog) && 2312 new_mtu > efx_xdp_max_mtu(efx)) { 2313 netif_err(efx, drv, efx->net_dev, 2314 "Requested MTU of %d too big for XDP (max: %d)\n", 2315 new_mtu, efx_xdp_max_mtu(efx)); 2316 return -EINVAL; 2317 } 2318 2319 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 2320 2321 efx_device_detach_sync(efx); 2322 efx_stop_all(efx); 2323 2324 mutex_lock(&efx->mac_lock); 2325 net_dev->mtu = new_mtu; 2326 efx_mac_reconfigure(efx); 2327 mutex_unlock(&efx->mac_lock); 2328 2329 efx_start_all(efx); 2330 efx_device_attach_if_not_resetting(efx); 2331 return 0; 2332 } 2333 2334 static int efx_set_mac_address(struct net_device *net_dev, void *data) 2335 { 2336 struct efx_nic *efx = netdev_priv(net_dev); 2337 struct sockaddr *addr = data; 2338 u8 *new_addr = addr->sa_data; 2339 u8 old_addr[6]; 2340 int rc; 2341 2342 if (!is_valid_ether_addr(new_addr)) { 2343 netif_err(efx, drv, efx->net_dev, 2344 "invalid ethernet MAC address requested: %pM\n", 2345 new_addr); 2346 return -EADDRNOTAVAIL; 2347 } 2348 2349 /* save old address */ 2350 ether_addr_copy(old_addr, net_dev->dev_addr); 2351 ether_addr_copy(net_dev->dev_addr, new_addr); 2352 if (efx->type->set_mac_address) { 2353 rc = efx->type->set_mac_address(efx); 2354 if (rc) { 2355 ether_addr_copy(net_dev->dev_addr, old_addr); 2356 return rc; 2357 } 2358 } 2359 2360 /* Reconfigure the MAC */ 2361 mutex_lock(&efx->mac_lock); 2362 efx_mac_reconfigure(efx); 2363 mutex_unlock(&efx->mac_lock); 2364 2365 return 0; 2366 } 2367 2368 /* Context: netif_addr_lock held, BHs disabled. */ 2369 static void efx_set_rx_mode(struct net_device *net_dev) 2370 { 2371 struct efx_nic *efx = netdev_priv(net_dev); 2372 2373 if (efx->port_enabled) 2374 queue_work(efx->workqueue, &efx->mac_work); 2375 /* Otherwise efx_start_port() will do this */ 2376 } 2377 2378 static int efx_set_features(struct net_device *net_dev, netdev_features_t data) 2379 { 2380 struct efx_nic *efx = netdev_priv(net_dev); 2381 int rc; 2382 2383 /* If disabling RX n-tuple filtering, clear existing filters */ 2384 if (net_dev->features & ~data & NETIF_F_NTUPLE) { 2385 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); 2386 if (rc) 2387 return rc; 2388 } 2389 2390 /* If Rx VLAN filter is changed, update filters via mac_reconfigure. 2391 * If rx-fcs is changed, mac_reconfigure updates that too. 2392 */ 2393 if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | 2394 NETIF_F_RXFCS)) { 2395 /* efx_set_rx_mode() will schedule MAC work to update filters 2396 * when a new features are finally set in net_dev. 2397 */ 2398 efx_set_rx_mode(net_dev); 2399 } 2400 2401 return 0; 2402 } 2403 2404 static int efx_get_phys_port_id(struct net_device *net_dev, 2405 struct netdev_phys_item_id *ppid) 2406 { 2407 struct efx_nic *efx = netdev_priv(net_dev); 2408 2409 if (efx->type->get_phys_port_id) 2410 return efx->type->get_phys_port_id(efx, ppid); 2411 else 2412 return -EOPNOTSUPP; 2413 } 2414 2415 static int efx_get_phys_port_name(struct net_device *net_dev, 2416 char *name, size_t len) 2417 { 2418 struct efx_nic *efx = netdev_priv(net_dev); 2419 2420 if (snprintf(name, len, "p%u", efx->port_num) >= len) 2421 return -EINVAL; 2422 return 0; 2423 } 2424 2425 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) 2426 { 2427 struct efx_nic *efx = netdev_priv(net_dev); 2428 2429 if (efx->type->vlan_rx_add_vid) 2430 return efx->type->vlan_rx_add_vid(efx, proto, vid); 2431 else 2432 return -EOPNOTSUPP; 2433 } 2434 2435 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) 2436 { 2437 struct efx_nic *efx = netdev_priv(net_dev); 2438 2439 if (efx->type->vlan_rx_kill_vid) 2440 return efx->type->vlan_rx_kill_vid(efx, proto, vid); 2441 else 2442 return -EOPNOTSUPP; 2443 } 2444 2445 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) 2446 { 2447 switch (in) { 2448 case UDP_TUNNEL_TYPE_VXLAN: 2449 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; 2450 case UDP_TUNNEL_TYPE_GENEVE: 2451 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; 2452 default: 2453 return -1; 2454 } 2455 } 2456 2457 static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) 2458 { 2459 struct efx_nic *efx = netdev_priv(dev); 2460 struct efx_udp_tunnel tnl; 2461 int efx_tunnel_type; 2462 2463 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); 2464 if (efx_tunnel_type < 0) 2465 return; 2466 2467 tnl.type = (u16)efx_tunnel_type; 2468 tnl.port = ti->port; 2469 2470 if (efx->type->udp_tnl_add_port) 2471 (void)efx->type->udp_tnl_add_port(efx, tnl); 2472 } 2473 2474 static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) 2475 { 2476 struct efx_nic *efx = netdev_priv(dev); 2477 struct efx_udp_tunnel tnl; 2478 int efx_tunnel_type; 2479 2480 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); 2481 if (efx_tunnel_type < 0) 2482 return; 2483 2484 tnl.type = (u16)efx_tunnel_type; 2485 tnl.port = ti->port; 2486 2487 if (efx->type->udp_tnl_del_port) 2488 (void)efx->type->udp_tnl_del_port(efx, tnl); 2489 } 2490 2491 static const struct net_device_ops efx_netdev_ops = { 2492 .ndo_open = efx_net_open, 2493 .ndo_stop = efx_net_stop, 2494 .ndo_get_stats64 = efx_net_stats, 2495 .ndo_tx_timeout = efx_watchdog, 2496 .ndo_start_xmit = efx_hard_start_xmit, 2497 .ndo_validate_addr = eth_validate_addr, 2498 .ndo_do_ioctl = efx_ioctl, 2499 .ndo_change_mtu = efx_change_mtu, 2500 .ndo_set_mac_address = efx_set_mac_address, 2501 .ndo_set_rx_mode = efx_set_rx_mode, 2502 .ndo_set_features = efx_set_features, 2503 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, 2504 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, 2505 #ifdef CONFIG_SFC_SRIOV 2506 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 2507 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 2508 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 2509 .ndo_get_vf_config = efx_sriov_get_vf_config, 2510 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 2511 #endif 2512 .ndo_get_phys_port_id = efx_get_phys_port_id, 2513 .ndo_get_phys_port_name = efx_get_phys_port_name, 2514 .ndo_setup_tc = efx_setup_tc, 2515 #ifdef CONFIG_RFS_ACCEL 2516 .ndo_rx_flow_steer = efx_filter_rfs, 2517 #endif 2518 .ndo_udp_tunnel_add = efx_udp_tunnel_add, 2519 .ndo_udp_tunnel_del = efx_udp_tunnel_del, 2520 .ndo_bpf = efx_xdp 2521 }; 2522 2523 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) 2524 { 2525 struct bpf_prog *old_prog; 2526 2527 if (efx->xdp_rxq_info_failed) { 2528 netif_err(efx, drv, efx->net_dev, 2529 "Unable to bind XDP program due to previous failure of rxq_info\n"); 2530 return -EINVAL; 2531 } 2532 2533 if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { 2534 netif_err(efx, drv, efx->net_dev, 2535 "Unable to configure XDP with MTU of %d (max: %d)\n", 2536 efx->net_dev->mtu, efx_xdp_max_mtu(efx)); 2537 return -EINVAL; 2538 } 2539 2540 old_prog = rtnl_dereference(efx->xdp_prog); 2541 rcu_assign_pointer(efx->xdp_prog, prog); 2542 /* Release the reference that was originally passed by the caller. */ 2543 if (old_prog) 2544 bpf_prog_put(old_prog); 2545 2546 return 0; 2547 } 2548 2549 /* Context: process, rtnl_lock() held. */ 2550 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2551 { 2552 struct efx_nic *efx = netdev_priv(dev); 2553 struct bpf_prog *xdp_prog; 2554 2555 switch (xdp->command) { 2556 case XDP_SETUP_PROG: 2557 return efx_xdp_setup_prog(efx, xdp->prog); 2558 case XDP_QUERY_PROG: 2559 xdp_prog = rtnl_dereference(efx->xdp_prog); 2560 xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0; 2561 return 0; 2562 default: 2563 return -EINVAL; 2564 } 2565 } 2566 2567 static void efx_update_name(struct efx_nic *efx) 2568 { 2569 strcpy(efx->name, efx->net_dev->name); 2570 efx_mtd_rename(efx); 2571 efx_set_channel_names(efx); 2572 } 2573 2574 static int efx_netdev_event(struct notifier_block *this, 2575 unsigned long event, void *ptr) 2576 { 2577 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2578 2579 if ((net_dev->netdev_ops == &efx_netdev_ops) && 2580 event == NETDEV_CHANGENAME) 2581 efx_update_name(netdev_priv(net_dev)); 2582 2583 return NOTIFY_DONE; 2584 } 2585 2586 static struct notifier_block efx_netdev_notifier = { 2587 .notifier_call = efx_netdev_event, 2588 }; 2589 2590 static ssize_t 2591 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 2592 { 2593 struct efx_nic *efx = dev_get_drvdata(dev); 2594 return sprintf(buf, "%d\n", efx->phy_type); 2595 } 2596 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); 2597 2598 #ifdef CONFIG_SFC_MCDI_LOGGING 2599 static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, 2600 char *buf) 2601 { 2602 struct efx_nic *efx = dev_get_drvdata(dev); 2603 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 2604 2605 return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); 2606 } 2607 static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, 2608 const char *buf, size_t count) 2609 { 2610 struct efx_nic *efx = dev_get_drvdata(dev); 2611 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 2612 bool enable = count > 0 && *buf != '0'; 2613 2614 mcdi->logging_enabled = enable; 2615 return count; 2616 } 2617 static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log); 2618 #endif 2619 2620 static int efx_register_netdev(struct efx_nic *efx) 2621 { 2622 struct net_device *net_dev = efx->net_dev; 2623 struct efx_channel *channel; 2624 int rc; 2625 2626 net_dev->watchdog_timeo = 5 * HZ; 2627 net_dev->irq = efx->pci_dev->irq; 2628 net_dev->netdev_ops = &efx_netdev_ops; 2629 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 2630 net_dev->priv_flags |= IFF_UNICAST_FLT; 2631 net_dev->ethtool_ops = &efx_ethtool_ops; 2632 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2633 net_dev->min_mtu = EFX_MIN_MTU; 2634 net_dev->max_mtu = EFX_MAX_MTU; 2635 2636 rtnl_lock(); 2637 2638 /* Enable resets to be scheduled and check whether any were 2639 * already requested. If so, the NIC is probably hosed so we 2640 * abort. 2641 */ 2642 efx->state = STATE_READY; 2643 smp_mb(); /* ensure we change state before checking reset_pending */ 2644 if (efx->reset_pending) { 2645 netif_err(efx, probe, efx->net_dev, 2646 "aborting probe due to scheduled reset\n"); 2647 rc = -EIO; 2648 goto fail_locked; 2649 } 2650 2651 rc = dev_alloc_name(net_dev, net_dev->name); 2652 if (rc < 0) 2653 goto fail_locked; 2654 efx_update_name(efx); 2655 2656 /* Always start with carrier off; PHY events will detect the link */ 2657 netif_carrier_off(net_dev); 2658 2659 rc = register_netdevice(net_dev); 2660 if (rc) 2661 goto fail_locked; 2662 2663 efx_for_each_channel(channel, efx) { 2664 struct efx_tx_queue *tx_queue; 2665 efx_for_each_channel_tx_queue(tx_queue, channel) 2666 efx_init_tx_queue_core_txq(tx_queue); 2667 } 2668 2669 efx_associate(efx); 2670 2671 rtnl_unlock(); 2672 2673 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2674 if (rc) { 2675 netif_err(efx, drv, efx->net_dev, 2676 "failed to init net dev attributes\n"); 2677 goto fail_registered; 2678 } 2679 #ifdef CONFIG_SFC_MCDI_LOGGING 2680 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); 2681 if (rc) { 2682 netif_err(efx, drv, efx->net_dev, 2683 "failed to init net dev attributes\n"); 2684 goto fail_attr_mcdi_logging; 2685 } 2686 #endif 2687 2688 return 0; 2689 2690 #ifdef CONFIG_SFC_MCDI_LOGGING 2691 fail_attr_mcdi_logging: 2692 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2693 #endif 2694 fail_registered: 2695 rtnl_lock(); 2696 efx_dissociate(efx); 2697 unregister_netdevice(net_dev); 2698 fail_locked: 2699 efx->state = STATE_UNINIT; 2700 rtnl_unlock(); 2701 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2702 return rc; 2703 } 2704 2705 static void efx_unregister_netdev(struct efx_nic *efx) 2706 { 2707 if (!efx->net_dev) 2708 return; 2709 2710 BUG_ON(netdev_priv(efx->net_dev) != efx); 2711 2712 if (efx_dev_registered(efx)) { 2713 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2714 #ifdef CONFIG_SFC_MCDI_LOGGING 2715 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); 2716 #endif 2717 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2718 unregister_netdev(efx->net_dev); 2719 } 2720 } 2721 2722 /************************************************************************** 2723 * 2724 * Device reset and suspend 2725 * 2726 **************************************************************************/ 2727 2728 /* Tears down the entire software state and most of the hardware state 2729 * before reset. */ 2730 void efx_reset_down(struct efx_nic *efx, enum reset_type method) 2731 { 2732 EFX_ASSERT_RESET_SERIALISED(efx); 2733 2734 if (method == RESET_TYPE_MCDI_TIMEOUT) 2735 efx->type->prepare_flr(efx); 2736 2737 efx_stop_all(efx); 2738 efx_disable_interrupts(efx); 2739 2740 mutex_lock(&efx->mac_lock); 2741 down_write(&efx->filter_sem); 2742 mutex_lock(&efx->rss_lock); 2743 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2744 method != RESET_TYPE_DATAPATH) 2745 efx->phy_op->fini(efx); 2746 efx->type->fini(efx); 2747 } 2748 2749 /* This function will always ensure that the locks acquired in 2750 * efx_reset_down() are released. A failure return code indicates 2751 * that we were unable to reinitialise the hardware, and the 2752 * driver should be disabled. If ok is false, then the rx and tx 2753 * engines are not restarted, pending a RESET_DISABLE. */ 2754 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) 2755 { 2756 int rc; 2757 2758 EFX_ASSERT_RESET_SERIALISED(efx); 2759 2760 if (method == RESET_TYPE_MCDI_TIMEOUT) 2761 efx->type->finish_flr(efx); 2762 2763 /* Ensure that SRAM is initialised even if we're disabling the device */ 2764 rc = efx->type->init(efx); 2765 if (rc) { 2766 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2767 goto fail; 2768 } 2769 2770 if (!ok) 2771 goto fail; 2772 2773 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2774 method != RESET_TYPE_DATAPATH) { 2775 rc = efx->phy_op->init(efx); 2776 if (rc) 2777 goto fail; 2778 rc = efx->phy_op->reconfigure(efx); 2779 if (rc && rc != -EPERM) 2780 netif_err(efx, drv, efx->net_dev, 2781 "could not restore PHY settings\n"); 2782 } 2783 2784 rc = efx_enable_interrupts(efx); 2785 if (rc) 2786 goto fail; 2787 2788 #ifdef CONFIG_SFC_SRIOV 2789 rc = efx->type->vswitching_restore(efx); 2790 if (rc) /* not fatal; the PF will still work fine */ 2791 netif_warn(efx, probe, efx->net_dev, 2792 "failed to restore vswitching rc=%d;" 2793 " VFs may not function\n", rc); 2794 #endif 2795 2796 if (efx->type->rx_restore_rss_contexts) 2797 efx->type->rx_restore_rss_contexts(efx); 2798 mutex_unlock(&efx->rss_lock); 2799 efx->type->filter_table_restore(efx); 2800 up_write(&efx->filter_sem); 2801 if (efx->type->sriov_reset) 2802 efx->type->sriov_reset(efx); 2803 2804 mutex_unlock(&efx->mac_lock); 2805 2806 efx_start_all(efx); 2807 2808 if (efx->type->udp_tnl_push_ports) 2809 efx->type->udp_tnl_push_ports(efx); 2810 2811 return 0; 2812 2813 fail: 2814 efx->port_initialized = false; 2815 2816 mutex_unlock(&efx->rss_lock); 2817 up_write(&efx->filter_sem); 2818 mutex_unlock(&efx->mac_lock); 2819 2820 return rc; 2821 } 2822 2823 /* Reset the NIC using the specified method. Note that the reset may 2824 * fail, in which case the card will be left in an unusable state. 2825 * 2826 * Caller must hold the rtnl_lock. 2827 */ 2828 int efx_reset(struct efx_nic *efx, enum reset_type method) 2829 { 2830 int rc, rc2; 2831 bool disabled; 2832 2833 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2834 RESET_TYPE(method)); 2835 2836 efx_device_detach_sync(efx); 2837 efx_reset_down(efx, method); 2838 2839 rc = efx->type->reset(efx, method); 2840 if (rc) { 2841 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 2842 goto out; 2843 } 2844 2845 /* Clear flags for the scopes we covered. We assume the NIC and 2846 * driver are now quiescent so that there is no race here. 2847 */ 2848 if (method < RESET_TYPE_MAX_METHOD) 2849 efx->reset_pending &= -(1 << (method + 1)); 2850 else /* it doesn't fit into the well-ordered scope hierarchy */ 2851 __clear_bit(method, &efx->reset_pending); 2852 2853 /* Reinitialise bus-mastering, which may have been turned off before 2854 * the reset was scheduled. This is still appropriate, even in the 2855 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 2856 * can respond to requests. */ 2857 pci_set_master(efx->pci_dev); 2858 2859 out: 2860 /* Leave device stopped if necessary */ 2861 disabled = rc || 2862 method == RESET_TYPE_DISABLE || 2863 method == RESET_TYPE_RECOVER_OR_DISABLE; 2864 rc2 = efx_reset_up(efx, method, !disabled); 2865 if (rc2) { 2866 disabled = true; 2867 if (!rc) 2868 rc = rc2; 2869 } 2870 2871 if (disabled) { 2872 dev_close(efx->net_dev); 2873 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 2874 efx->state = STATE_DISABLED; 2875 } else { 2876 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2877 efx_device_attach_if_not_resetting(efx); 2878 } 2879 return rc; 2880 } 2881 2882 /* Try recovery mechanisms. 2883 * For now only EEH is supported. 2884 * Returns 0 if the recovery mechanisms are unsuccessful. 2885 * Returns a non-zero value otherwise. 2886 */ 2887 int efx_try_recovery(struct efx_nic *efx) 2888 { 2889 #ifdef CONFIG_EEH 2890 /* A PCI error can occur and not be seen by EEH because nothing 2891 * happens on the PCI bus. In this case the driver may fail and 2892 * schedule a 'recover or reset', leading to this recovery handler. 2893 * Manually call the eeh failure check function. 2894 */ 2895 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); 2896 if (eeh_dev_check_failure(eehdev)) { 2897 /* The EEH mechanisms will handle the error and reset the 2898 * device if necessary. 2899 */ 2900 return 1; 2901 } 2902 #endif 2903 return 0; 2904 } 2905 2906 static void efx_wait_for_bist_end(struct efx_nic *efx) 2907 { 2908 int i; 2909 2910 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { 2911 if (efx_mcdi_poll_reboot(efx)) 2912 goto out; 2913 msleep(BIST_WAIT_DELAY_MS); 2914 } 2915 2916 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); 2917 out: 2918 /* Either way unset the BIST flag. If we found no reboot we probably 2919 * won't recover, but we should try. 2920 */ 2921 efx->mc_bist_for_other_fn = false; 2922 } 2923 2924 /* The worker thread exists so that code that cannot sleep can 2925 * schedule a reset for later. 2926 */ 2927 static void efx_reset_work(struct work_struct *data) 2928 { 2929 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2930 unsigned long pending; 2931 enum reset_type method; 2932 2933 pending = READ_ONCE(efx->reset_pending); 2934 method = fls(pending) - 1; 2935 2936 if (method == RESET_TYPE_MC_BIST) 2937 efx_wait_for_bist_end(efx); 2938 2939 if ((method == RESET_TYPE_RECOVER_OR_DISABLE || 2940 method == RESET_TYPE_RECOVER_OR_ALL) && 2941 efx_try_recovery(efx)) 2942 return; 2943 2944 if (!pending) 2945 return; 2946 2947 rtnl_lock(); 2948 2949 /* We checked the state in efx_schedule_reset() but it may 2950 * have changed by now. Now that we have the RTNL lock, 2951 * it cannot change again. 2952 */ 2953 if (efx->state == STATE_READY) 2954 (void)efx_reset(efx, method); 2955 2956 rtnl_unlock(); 2957 } 2958 2959 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 2960 { 2961 enum reset_type method; 2962 2963 if (efx->state == STATE_RECOVERY) { 2964 netif_dbg(efx, drv, efx->net_dev, 2965 "recovering: skip scheduling %s reset\n", 2966 RESET_TYPE(type)); 2967 return; 2968 } 2969 2970 switch (type) { 2971 case RESET_TYPE_INVISIBLE: 2972 case RESET_TYPE_ALL: 2973 case RESET_TYPE_RECOVER_OR_ALL: 2974 case RESET_TYPE_WORLD: 2975 case RESET_TYPE_DISABLE: 2976 case RESET_TYPE_RECOVER_OR_DISABLE: 2977 case RESET_TYPE_DATAPATH: 2978 case RESET_TYPE_MC_BIST: 2979 case RESET_TYPE_MCDI_TIMEOUT: 2980 method = type; 2981 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2982 RESET_TYPE(method)); 2983 break; 2984 default: 2985 method = efx->type->map_reset_reason(type); 2986 netif_dbg(efx, drv, efx->net_dev, 2987 "scheduling %s reset for %s\n", 2988 RESET_TYPE(method), RESET_TYPE(type)); 2989 break; 2990 } 2991 2992 set_bit(method, &efx->reset_pending); 2993 smp_mb(); /* ensure we change reset_pending before checking state */ 2994 2995 /* If we're not READY then just leave the flags set as the cue 2996 * to abort probing or reschedule the reset later. 2997 */ 2998 if (READ_ONCE(efx->state) != STATE_READY) 2999 return; 3000 3001 /* efx_process_channel() will no longer read events once a 3002 * reset is scheduled. So switch back to poll'd MCDI completions. */ 3003 efx_mcdi_mode_poll(efx); 3004 3005 queue_work(reset_workqueue, &efx->reset_work); 3006 } 3007 3008 /************************************************************************** 3009 * 3010 * List of NICs we support 3011 * 3012 **************************************************************************/ 3013 3014 /* PCI device ID table */ 3015 static const struct pci_device_id efx_pci_table[] = { 3016 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ 3017 .driver_data = (unsigned long) &siena_a0_nic_type}, 3018 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 3019 .driver_data = (unsigned long) &siena_a0_nic_type}, 3020 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 3021 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 3022 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 3023 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 3024 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 3025 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 3026 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ 3027 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 3028 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ 3029 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 3030 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ 3031 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 3032 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ 3033 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 3034 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ 3035 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 3036 {0} /* end of list */ 3037 }; 3038 3039 /************************************************************************** 3040 * 3041 * Dummy PHY/MAC operations 3042 * 3043 * Can be used for some unimplemented operations 3044 * Needed so all function pointers are valid and do not have to be tested 3045 * before use 3046 * 3047 **************************************************************************/ 3048 int efx_port_dummy_op_int(struct efx_nic *efx) 3049 { 3050 return 0; 3051 } 3052 void efx_port_dummy_op_void(struct efx_nic *efx) {} 3053 3054 static bool efx_port_dummy_op_poll(struct efx_nic *efx) 3055 { 3056 return false; 3057 } 3058 3059 static const struct efx_phy_operations efx_dummy_phy_operations = { 3060 .init = efx_port_dummy_op_int, 3061 .reconfigure = efx_port_dummy_op_int, 3062 .poll = efx_port_dummy_op_poll, 3063 .fini = efx_port_dummy_op_void, 3064 }; 3065 3066 /************************************************************************** 3067 * 3068 * Data housekeeping 3069 * 3070 **************************************************************************/ 3071 3072 /* This zeroes out and then fills in the invariants in a struct 3073 * efx_nic (including all sub-structures). 3074 */ 3075 static int efx_init_struct(struct efx_nic *efx, 3076 struct pci_dev *pci_dev, struct net_device *net_dev) 3077 { 3078 int rc = -ENOMEM, i; 3079 3080 /* Initialise common structures */ 3081 INIT_LIST_HEAD(&efx->node); 3082 INIT_LIST_HEAD(&efx->secondary_list); 3083 spin_lock_init(&efx->biu_lock); 3084 #ifdef CONFIG_SFC_MTD 3085 INIT_LIST_HEAD(&efx->mtd_list); 3086 #endif 3087 INIT_WORK(&efx->reset_work, efx_reset_work); 3088 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 3089 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 3090 efx->pci_dev = pci_dev; 3091 efx->msg_enable = debug; 3092 efx->state = STATE_UNINIT; 3093 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 3094 3095 efx->net_dev = net_dev; 3096 efx->rx_prefix_size = efx->type->rx_prefix_size; 3097 efx->rx_ip_align = 3098 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; 3099 efx->rx_packet_hash_offset = 3100 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 3101 efx->rx_packet_ts_offset = 3102 efx->type->rx_ts_offset - efx->type->rx_prefix_size; 3103 INIT_LIST_HEAD(&efx->rss_context.list); 3104 mutex_init(&efx->rss_lock); 3105 spin_lock_init(&efx->stats_lock); 3106 efx->vi_stride = EFX_DEFAULT_VI_STRIDE; 3107 efx->num_mac_stats = MC_CMD_MAC_NSTATS; 3108 BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); 3109 mutex_init(&efx->mac_lock); 3110 #ifdef CONFIG_RFS_ACCEL 3111 mutex_init(&efx->rps_mutex); 3112 spin_lock_init(&efx->rps_hash_lock); 3113 /* Failure to allocate is not fatal, but may degrade ARFS performance */ 3114 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, 3115 sizeof(*efx->rps_hash_table), GFP_KERNEL); 3116 #endif 3117 efx->phy_op = &efx_dummy_phy_operations; 3118 efx->mdio.dev = net_dev; 3119 INIT_WORK(&efx->mac_work, efx_mac_work); 3120 init_waitqueue_head(&efx->flush_wq); 3121 3122 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 3123 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 3124 if (!efx->channel[i]) 3125 goto fail; 3126 efx->msi_context[i].efx = efx; 3127 efx->msi_context[i].index = i; 3128 } 3129 3130 /* Higher numbered interrupt modes are less capable! */ 3131 if (WARN_ON_ONCE(efx->type->max_interrupt_mode > 3132 efx->type->min_interrupt_mode)) { 3133 rc = -EIO; 3134 goto fail; 3135 } 3136 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 3137 interrupt_mode); 3138 efx->interrupt_mode = min(efx->type->min_interrupt_mode, 3139 interrupt_mode); 3140 3141 /* Would be good to use the net_dev name, but we're too early */ 3142 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 3143 pci_name(pci_dev)); 3144 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 3145 if (!efx->workqueue) 3146 goto fail; 3147 3148 return 0; 3149 3150 fail: 3151 efx_fini_struct(efx); 3152 return rc; 3153 } 3154 3155 static void efx_fini_struct(struct efx_nic *efx) 3156 { 3157 int i; 3158 3159 #ifdef CONFIG_RFS_ACCEL 3160 kfree(efx->rps_hash_table); 3161 #endif 3162 3163 for (i = 0; i < EFX_MAX_CHANNELS; i++) 3164 kfree(efx->channel[i]); 3165 3166 kfree(efx->vpd_sn); 3167 3168 if (efx->workqueue) { 3169 destroy_workqueue(efx->workqueue); 3170 efx->workqueue = NULL; 3171 } 3172 } 3173 3174 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 3175 { 3176 u64 n_rx_nodesc_trunc = 0; 3177 struct efx_channel *channel; 3178 3179 efx_for_each_channel(channel, efx) 3180 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 3181 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 3182 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 3183 } 3184 3185 bool efx_filter_spec_equal(const struct efx_filter_spec *left, 3186 const struct efx_filter_spec *right) 3187 { 3188 if ((left->match_flags ^ right->match_flags) | 3189 ((left->flags ^ right->flags) & 3190 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 3191 return false; 3192 3193 return memcmp(&left->outer_vid, &right->outer_vid, 3194 sizeof(struct efx_filter_spec) - 3195 offsetof(struct efx_filter_spec, outer_vid)) == 0; 3196 } 3197 3198 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) 3199 { 3200 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); 3201 return jhash2((const u32 *)&spec->outer_vid, 3202 (sizeof(struct efx_filter_spec) - 3203 offsetof(struct efx_filter_spec, outer_vid)) / 4, 3204 0); 3205 } 3206 3207 #ifdef CONFIG_RFS_ACCEL 3208 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, 3209 bool *force) 3210 { 3211 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { 3212 /* ARFS is currently updating this entry, leave it */ 3213 return false; 3214 } 3215 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { 3216 /* ARFS tried and failed to update this, so it's probably out 3217 * of date. Remove the filter and the ARFS rule entry. 3218 */ 3219 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 3220 *force = true; 3221 return true; 3222 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ 3223 /* ARFS has moved on, so old filter is not needed. Since we did 3224 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will 3225 * not be removed by efx_rps_hash_del() subsequently. 3226 */ 3227 *force = true; 3228 return true; 3229 } 3230 /* Remove it iff ARFS wants to. */ 3231 return true; 3232 } 3233 3234 static 3235 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, 3236 const struct efx_filter_spec *spec) 3237 { 3238 u32 hash = efx_filter_spec_hash(spec); 3239 3240 lockdep_assert_held(&efx->rps_hash_lock); 3241 if (!efx->rps_hash_table) 3242 return NULL; 3243 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; 3244 } 3245 3246 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, 3247 const struct efx_filter_spec *spec) 3248 { 3249 struct efx_arfs_rule *rule; 3250 struct hlist_head *head; 3251 struct hlist_node *node; 3252 3253 head = efx_rps_hash_bucket(efx, spec); 3254 if (!head) 3255 return NULL; 3256 hlist_for_each(node, head) { 3257 rule = container_of(node, struct efx_arfs_rule, node); 3258 if (efx_filter_spec_equal(spec, &rule->spec)) 3259 return rule; 3260 } 3261 return NULL; 3262 } 3263 3264 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, 3265 const struct efx_filter_spec *spec, 3266 bool *new) 3267 { 3268 struct efx_arfs_rule *rule; 3269 struct hlist_head *head; 3270 struct hlist_node *node; 3271 3272 head = efx_rps_hash_bucket(efx, spec); 3273 if (!head) 3274 return NULL; 3275 hlist_for_each(node, head) { 3276 rule = container_of(node, struct efx_arfs_rule, node); 3277 if (efx_filter_spec_equal(spec, &rule->spec)) { 3278 *new = false; 3279 return rule; 3280 } 3281 } 3282 rule = kmalloc(sizeof(*rule), GFP_ATOMIC); 3283 *new = true; 3284 if (rule) { 3285 memcpy(&rule->spec, spec, sizeof(rule->spec)); 3286 hlist_add_head(&rule->node, head); 3287 } 3288 return rule; 3289 } 3290 3291 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) 3292 { 3293 struct efx_arfs_rule *rule; 3294 struct hlist_head *head; 3295 struct hlist_node *node; 3296 3297 head = efx_rps_hash_bucket(efx, spec); 3298 if (WARN_ON(!head)) 3299 return; 3300 hlist_for_each(node, head) { 3301 rule = container_of(node, struct efx_arfs_rule, node); 3302 if (efx_filter_spec_equal(spec, &rule->spec)) { 3303 /* Someone already reused the entry. We know that if 3304 * this check doesn't fire (i.e. filter_id == REMOVING) 3305 * then the REMOVING mark was put there by our caller, 3306 * because caller is holding a lock on filter table and 3307 * only holders of that lock set REMOVING. 3308 */ 3309 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) 3310 return; 3311 hlist_del(node); 3312 kfree(rule); 3313 return; 3314 } 3315 } 3316 /* We didn't find it. */ 3317 WARN_ON(1); 3318 } 3319 #endif 3320 3321 /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because 3322 * (a) this is an infrequent control-plane operation and (b) n is small (max 64) 3323 */ 3324 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) 3325 { 3326 struct list_head *head = &efx->rss_context.list; 3327 struct efx_rss_context *ctx, *new; 3328 u32 id = 1; /* Don't use zero, that refers to the master RSS context */ 3329 3330 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 3331 3332 /* Search for first gap in the numbering */ 3333 list_for_each_entry(ctx, head, list) { 3334 if (ctx->user_id != id) 3335 break; 3336 id++; 3337 /* Check for wrap. If this happens, we have nearly 2^32 3338 * allocated RSS contexts, which seems unlikely. 3339 */ 3340 if (WARN_ON_ONCE(!id)) 3341 return NULL; 3342 } 3343 3344 /* Create the new entry */ 3345 new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL); 3346 if (!new) 3347 return NULL; 3348 new->context_id = EFX_EF10_RSS_CONTEXT_INVALID; 3349 new->rx_hash_udp_4tuple = false; 3350 3351 /* Insert the new entry into the gap */ 3352 new->user_id = id; 3353 list_add_tail(&new->list, &ctx->list); 3354 return new; 3355 } 3356 3357 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) 3358 { 3359 struct list_head *head = &efx->rss_context.list; 3360 struct efx_rss_context *ctx; 3361 3362 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 3363 3364 list_for_each_entry(ctx, head, list) 3365 if (ctx->user_id == id) 3366 return ctx; 3367 return NULL; 3368 } 3369 3370 void efx_free_rss_context_entry(struct efx_rss_context *ctx) 3371 { 3372 list_del(&ctx->list); 3373 kfree(ctx); 3374 } 3375 3376 /************************************************************************** 3377 * 3378 * PCI interface 3379 * 3380 **************************************************************************/ 3381 3382 /* Main body of final NIC shutdown code 3383 * This is called only at module unload (or hotplug removal). 3384 */ 3385 static void efx_pci_remove_main(struct efx_nic *efx) 3386 { 3387 /* Flush reset_work. It can no longer be scheduled since we 3388 * are not READY. 3389 */ 3390 BUG_ON(efx->state == STATE_READY); 3391 cancel_work_sync(&efx->reset_work); 3392 3393 efx_disable_interrupts(efx); 3394 efx_clear_interrupt_affinity(efx); 3395 efx_nic_fini_interrupt(efx); 3396 efx_fini_port(efx); 3397 efx->type->fini(efx); 3398 efx_fini_napi(efx); 3399 efx_remove_all(efx); 3400 } 3401 3402 /* Final NIC shutdown 3403 * This is called only at module unload (or hotplug removal). A PF can call 3404 * this on its VFs to ensure they are unbound first. 3405 */ 3406 static void efx_pci_remove(struct pci_dev *pci_dev) 3407 { 3408 struct efx_nic *efx; 3409 3410 efx = pci_get_drvdata(pci_dev); 3411 if (!efx) 3412 return; 3413 3414 /* Mark the NIC as fini, then stop the interface */ 3415 rtnl_lock(); 3416 efx_dissociate(efx); 3417 dev_close(efx->net_dev); 3418 efx_disable_interrupts(efx); 3419 efx->state = STATE_UNINIT; 3420 rtnl_unlock(); 3421 3422 if (efx->type->sriov_fini) 3423 efx->type->sriov_fini(efx); 3424 3425 efx_unregister_netdev(efx); 3426 3427 efx_mtd_remove(efx); 3428 3429 efx_pci_remove_main(efx); 3430 3431 efx_fini_io(efx); 3432 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 3433 3434 efx_fini_struct(efx); 3435 free_netdev(efx->net_dev); 3436 3437 pci_disable_pcie_error_reporting(pci_dev); 3438 }; 3439 3440 /* NIC VPD information 3441 * Called during probe to display the part number of the 3442 * installed NIC. VPD is potentially very large but this should 3443 * always appear within the first 512 bytes. 3444 */ 3445 #define SFC_VPD_LEN 512 3446 static void efx_probe_vpd_strings(struct efx_nic *efx) 3447 { 3448 struct pci_dev *dev = efx->pci_dev; 3449 char vpd_data[SFC_VPD_LEN]; 3450 ssize_t vpd_size; 3451 int ro_start, ro_size, i, j; 3452 3453 /* Get the vpd data from the device */ 3454 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 3455 if (vpd_size <= 0) { 3456 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); 3457 return; 3458 } 3459 3460 /* Get the Read only section */ 3461 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 3462 if (ro_start < 0) { 3463 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); 3464 return; 3465 } 3466 3467 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 3468 j = ro_size; 3469 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 3470 if (i + j > vpd_size) 3471 j = vpd_size - i; 3472 3473 /* Get the Part number */ 3474 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); 3475 if (i < 0) { 3476 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); 3477 return; 3478 } 3479 3480 j = pci_vpd_info_field_size(&vpd_data[i]); 3481 i += PCI_VPD_INFO_FLD_HDR_SIZE; 3482 if (i + j > vpd_size) { 3483 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); 3484 return; 3485 } 3486 3487 netif_info(efx, drv, efx->net_dev, 3488 "Part Number : %.*s\n", j, &vpd_data[i]); 3489 3490 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 3491 j = ro_size; 3492 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); 3493 if (i < 0) { 3494 netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); 3495 return; 3496 } 3497 3498 j = pci_vpd_info_field_size(&vpd_data[i]); 3499 i += PCI_VPD_INFO_FLD_HDR_SIZE; 3500 if (i + j > vpd_size) { 3501 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); 3502 return; 3503 } 3504 3505 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); 3506 if (!efx->vpd_sn) 3507 return; 3508 3509 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); 3510 } 3511 3512 3513 /* Main body of NIC initialisation 3514 * This is called at module load (or hotplug insertion, theoretically). 3515 */ 3516 static int efx_pci_probe_main(struct efx_nic *efx) 3517 { 3518 int rc; 3519 3520 /* Do start-of-day initialisation */ 3521 rc = efx_probe_all(efx); 3522 if (rc) 3523 goto fail1; 3524 3525 efx_init_napi(efx); 3526 3527 down_write(&efx->filter_sem); 3528 rc = efx->type->init(efx); 3529 up_write(&efx->filter_sem); 3530 if (rc) { 3531 netif_err(efx, probe, efx->net_dev, 3532 "failed to initialise NIC\n"); 3533 goto fail3; 3534 } 3535 3536 rc = efx_init_port(efx); 3537 if (rc) { 3538 netif_err(efx, probe, efx->net_dev, 3539 "failed to initialise port\n"); 3540 goto fail4; 3541 } 3542 3543 rc = efx_nic_init_interrupt(efx); 3544 if (rc) 3545 goto fail5; 3546 3547 efx_set_interrupt_affinity(efx); 3548 rc = efx_enable_interrupts(efx); 3549 if (rc) 3550 goto fail6; 3551 3552 return 0; 3553 3554 fail6: 3555 efx_clear_interrupt_affinity(efx); 3556 efx_nic_fini_interrupt(efx); 3557 fail5: 3558 efx_fini_port(efx); 3559 fail4: 3560 efx->type->fini(efx); 3561 fail3: 3562 efx_fini_napi(efx); 3563 efx_remove_all(efx); 3564 fail1: 3565 return rc; 3566 } 3567 3568 static int efx_pci_probe_post_io(struct efx_nic *efx) 3569 { 3570 struct net_device *net_dev = efx->net_dev; 3571 int rc = efx_pci_probe_main(efx); 3572 3573 if (rc) 3574 return rc; 3575 3576 if (efx->type->sriov_init) { 3577 rc = efx->type->sriov_init(efx); 3578 if (rc) 3579 netif_err(efx, probe, efx->net_dev, 3580 "SR-IOV can't be enabled rc %d\n", rc); 3581 } 3582 3583 /* Determine netdevice features */ 3584 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 3585 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); 3586 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 3587 net_dev->features |= NETIF_F_TSO6; 3588 /* Check whether device supports TSO */ 3589 if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) 3590 net_dev->features &= ~NETIF_F_ALL_TSO; 3591 /* Mask for features that also apply to VLAN devices */ 3592 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 3593 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 3594 NETIF_F_RXCSUM); 3595 3596 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; 3597 3598 /* Disable receiving frames with bad FCS, by default. */ 3599 net_dev->features &= ~NETIF_F_RXALL; 3600 3601 /* Disable VLAN filtering by default. It may be enforced if 3602 * the feature is fixed (i.e. VLAN filters are required to 3603 * receive VLAN tagged packets due to vPort restrictions). 3604 */ 3605 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 3606 net_dev->features |= efx->fixed_features; 3607 3608 rc = efx_register_netdev(efx); 3609 if (!rc) 3610 return 0; 3611 3612 efx_pci_remove_main(efx); 3613 return rc; 3614 } 3615 3616 /* NIC initialisation 3617 * 3618 * This is called at module load (or hotplug insertion, 3619 * theoretically). It sets up PCI mappings, resets the NIC, 3620 * sets up and registers the network devices with the kernel and hooks 3621 * the interrupt service routine. It does not prepare the device for 3622 * transmission; this is left to the first time one of the network 3623 * interfaces is brought up (i.e. efx_net_open). 3624 */ 3625 static int efx_pci_probe(struct pci_dev *pci_dev, 3626 const struct pci_device_id *entry) 3627 { 3628 struct net_device *net_dev; 3629 struct efx_nic *efx; 3630 int rc; 3631 3632 /* Allocate and initialise a struct net_device and struct efx_nic */ 3633 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 3634 EFX_MAX_RX_QUEUES); 3635 if (!net_dev) 3636 return -ENOMEM; 3637 efx = netdev_priv(net_dev); 3638 efx->type = (const struct efx_nic_type *) entry->driver_data; 3639 efx->fixed_features |= NETIF_F_HIGHDMA; 3640 3641 pci_set_drvdata(pci_dev, efx); 3642 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 3643 rc = efx_init_struct(efx, pci_dev, net_dev); 3644 if (rc) 3645 goto fail1; 3646 3647 netif_info(efx, probe, efx->net_dev, 3648 "Solarflare NIC detected\n"); 3649 3650 if (!efx->type->is_vf) 3651 efx_probe_vpd_strings(efx); 3652 3653 /* Set up basic I/O (BAR mappings etc) */ 3654 rc = efx_init_io(efx); 3655 if (rc) 3656 goto fail2; 3657 3658 rc = efx_pci_probe_post_io(efx); 3659 if (rc) { 3660 /* On failure, retry once immediately. 3661 * If we aborted probe due to a scheduled reset, dismiss it. 3662 */ 3663 efx->reset_pending = 0; 3664 rc = efx_pci_probe_post_io(efx); 3665 if (rc) { 3666 /* On another failure, retry once more 3667 * after a 50-305ms delay. 3668 */ 3669 unsigned char r; 3670 3671 get_random_bytes(&r, 1); 3672 msleep((unsigned int)r + 50); 3673 efx->reset_pending = 0; 3674 rc = efx_pci_probe_post_io(efx); 3675 } 3676 } 3677 if (rc) 3678 goto fail3; 3679 3680 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 3681 3682 /* Try to create MTDs, but allow this to fail */ 3683 rtnl_lock(); 3684 rc = efx_mtd_probe(efx); 3685 rtnl_unlock(); 3686 if (rc && rc != -EPERM) 3687 netif_warn(efx, probe, efx->net_dev, 3688 "failed to create MTDs (%d)\n", rc); 3689 3690 (void)pci_enable_pcie_error_reporting(pci_dev); 3691 3692 if (efx->type->udp_tnl_push_ports) 3693 efx->type->udp_tnl_push_ports(efx); 3694 3695 return 0; 3696 3697 fail3: 3698 efx_fini_io(efx); 3699 fail2: 3700 efx_fini_struct(efx); 3701 fail1: 3702 WARN_ON(rc > 0); 3703 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 3704 free_netdev(net_dev); 3705 return rc; 3706 } 3707 3708 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 3709 * enabled on success 3710 */ 3711 #ifdef CONFIG_SFC_SRIOV 3712 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 3713 { 3714 int rc; 3715 struct efx_nic *efx = pci_get_drvdata(dev); 3716 3717 if (efx->type->sriov_configure) { 3718 rc = efx->type->sriov_configure(efx, num_vfs); 3719 if (rc) 3720 return rc; 3721 else 3722 return num_vfs; 3723 } else 3724 return -EOPNOTSUPP; 3725 } 3726 #endif 3727 3728 static int efx_pm_freeze(struct device *dev) 3729 { 3730 struct efx_nic *efx = dev_get_drvdata(dev); 3731 3732 rtnl_lock(); 3733 3734 if (efx->state != STATE_DISABLED) { 3735 efx->state = STATE_UNINIT; 3736 3737 efx_device_detach_sync(efx); 3738 3739 efx_stop_all(efx); 3740 efx_disable_interrupts(efx); 3741 } 3742 3743 rtnl_unlock(); 3744 3745 return 0; 3746 } 3747 3748 static int efx_pm_thaw(struct device *dev) 3749 { 3750 int rc; 3751 struct efx_nic *efx = dev_get_drvdata(dev); 3752 3753 rtnl_lock(); 3754 3755 if (efx->state != STATE_DISABLED) { 3756 rc = efx_enable_interrupts(efx); 3757 if (rc) 3758 goto fail; 3759 3760 mutex_lock(&efx->mac_lock); 3761 efx->phy_op->reconfigure(efx); 3762 mutex_unlock(&efx->mac_lock); 3763 3764 efx_start_all(efx); 3765 3766 efx_device_attach_if_not_resetting(efx); 3767 3768 efx->state = STATE_READY; 3769 3770 efx->type->resume_wol(efx); 3771 } 3772 3773 rtnl_unlock(); 3774 3775 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 3776 queue_work(reset_workqueue, &efx->reset_work); 3777 3778 return 0; 3779 3780 fail: 3781 rtnl_unlock(); 3782 3783 return rc; 3784 } 3785 3786 static int efx_pm_poweroff(struct device *dev) 3787 { 3788 struct pci_dev *pci_dev = to_pci_dev(dev); 3789 struct efx_nic *efx = pci_get_drvdata(pci_dev); 3790 3791 efx->type->fini(efx); 3792 3793 efx->reset_pending = 0; 3794 3795 pci_save_state(pci_dev); 3796 return pci_set_power_state(pci_dev, PCI_D3hot); 3797 } 3798 3799 /* Used for both resume and restore */ 3800 static int efx_pm_resume(struct device *dev) 3801 { 3802 struct pci_dev *pci_dev = to_pci_dev(dev); 3803 struct efx_nic *efx = pci_get_drvdata(pci_dev); 3804 int rc; 3805 3806 rc = pci_set_power_state(pci_dev, PCI_D0); 3807 if (rc) 3808 return rc; 3809 pci_restore_state(pci_dev); 3810 rc = pci_enable_device(pci_dev); 3811 if (rc) 3812 return rc; 3813 pci_set_master(efx->pci_dev); 3814 rc = efx->type->reset(efx, RESET_TYPE_ALL); 3815 if (rc) 3816 return rc; 3817 down_write(&efx->filter_sem); 3818 rc = efx->type->init(efx); 3819 up_write(&efx->filter_sem); 3820 if (rc) 3821 return rc; 3822 rc = efx_pm_thaw(dev); 3823 return rc; 3824 } 3825 3826 static int efx_pm_suspend(struct device *dev) 3827 { 3828 int rc; 3829 3830 efx_pm_freeze(dev); 3831 rc = efx_pm_poweroff(dev); 3832 if (rc) 3833 efx_pm_resume(dev); 3834 return rc; 3835 } 3836 3837 static const struct dev_pm_ops efx_pm_ops = { 3838 .suspend = efx_pm_suspend, 3839 .resume = efx_pm_resume, 3840 .freeze = efx_pm_freeze, 3841 .thaw = efx_pm_thaw, 3842 .poweroff = efx_pm_poweroff, 3843 .restore = efx_pm_resume, 3844 }; 3845 3846 /* A PCI error affecting this device was detected. 3847 * At this point MMIO and DMA may be disabled. 3848 * Stop the software path and request a slot reset. 3849 */ 3850 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, 3851 enum pci_channel_state state) 3852 { 3853 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3854 struct efx_nic *efx = pci_get_drvdata(pdev); 3855 3856 if (state == pci_channel_io_perm_failure) 3857 return PCI_ERS_RESULT_DISCONNECT; 3858 3859 rtnl_lock(); 3860 3861 if (efx->state != STATE_DISABLED) { 3862 efx->state = STATE_RECOVERY; 3863 efx->reset_pending = 0; 3864 3865 efx_device_detach_sync(efx); 3866 3867 efx_stop_all(efx); 3868 efx_disable_interrupts(efx); 3869 3870 status = PCI_ERS_RESULT_NEED_RESET; 3871 } else { 3872 /* If the interface is disabled we don't want to do anything 3873 * with it. 3874 */ 3875 status = PCI_ERS_RESULT_RECOVERED; 3876 } 3877 3878 rtnl_unlock(); 3879 3880 pci_disable_device(pdev); 3881 3882 return status; 3883 } 3884 3885 /* Fake a successful reset, which will be performed later in efx_io_resume. */ 3886 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) 3887 { 3888 struct efx_nic *efx = pci_get_drvdata(pdev); 3889 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3890 3891 if (pci_enable_device(pdev)) { 3892 netif_err(efx, hw, efx->net_dev, 3893 "Cannot re-enable PCI device after reset.\n"); 3894 status = PCI_ERS_RESULT_DISCONNECT; 3895 } 3896 3897 return status; 3898 } 3899 3900 /* Perform the actual reset and resume I/O operations. */ 3901 static void efx_io_resume(struct pci_dev *pdev) 3902 { 3903 struct efx_nic *efx = pci_get_drvdata(pdev); 3904 int rc; 3905 3906 rtnl_lock(); 3907 3908 if (efx->state == STATE_DISABLED) 3909 goto out; 3910 3911 rc = efx_reset(efx, RESET_TYPE_ALL); 3912 if (rc) { 3913 netif_err(efx, hw, efx->net_dev, 3914 "efx_reset failed after PCI error (%d)\n", rc); 3915 } else { 3916 efx->state = STATE_READY; 3917 netif_dbg(efx, hw, efx->net_dev, 3918 "Done resetting and resuming IO after PCI error.\n"); 3919 } 3920 3921 out: 3922 rtnl_unlock(); 3923 } 3924 3925 /* For simplicity and reliability, we always require a slot reset and try to 3926 * reset the hardware when a pci error affecting the device is detected. 3927 * We leave both the link_reset and mmio_enabled callback unimplemented: 3928 * with our request for slot reset the mmio_enabled callback will never be 3929 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3930 */ 3931 static const struct pci_error_handlers efx_err_handlers = { 3932 .error_detected = efx_io_error_detected, 3933 .slot_reset = efx_io_slot_reset, 3934 .resume = efx_io_resume, 3935 }; 3936 3937 static struct pci_driver efx_pci_driver = { 3938 .name = KBUILD_MODNAME, 3939 .id_table = efx_pci_table, 3940 .probe = efx_pci_probe, 3941 .remove = efx_pci_remove, 3942 .driver.pm = &efx_pm_ops, 3943 .err_handler = &efx_err_handlers, 3944 #ifdef CONFIG_SFC_SRIOV 3945 .sriov_configure = efx_pci_sriov_configure, 3946 #endif 3947 }; 3948 3949 /************************************************************************** 3950 * 3951 * Kernel module interface 3952 * 3953 *************************************************************************/ 3954 3955 module_param(interrupt_mode, uint, 0444); 3956 MODULE_PARM_DESC(interrupt_mode, 3957 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 3958 3959 static int __init efx_init_module(void) 3960 { 3961 int rc; 3962 3963 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 3964 3965 rc = register_netdevice_notifier(&efx_netdev_notifier); 3966 if (rc) 3967 goto err_notifier; 3968 3969 #ifdef CONFIG_SFC_SRIOV 3970 rc = efx_init_sriov(); 3971 if (rc) 3972 goto err_sriov; 3973 #endif 3974 3975 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 3976 if (!reset_workqueue) { 3977 rc = -ENOMEM; 3978 goto err_reset; 3979 } 3980 3981 rc = pci_register_driver(&efx_pci_driver); 3982 if (rc < 0) 3983 goto err_pci; 3984 3985 return 0; 3986 3987 err_pci: 3988 destroy_workqueue(reset_workqueue); 3989 err_reset: 3990 #ifdef CONFIG_SFC_SRIOV 3991 efx_fini_sriov(); 3992 err_sriov: 3993 #endif 3994 unregister_netdevice_notifier(&efx_netdev_notifier); 3995 err_notifier: 3996 return rc; 3997 } 3998 3999 static void __exit efx_exit_module(void) 4000 { 4001 printk(KERN_INFO "Solarflare NET driver unloading\n"); 4002 4003 pci_unregister_driver(&efx_pci_driver); 4004 destroy_workqueue(reset_workqueue); 4005 #ifdef CONFIG_SFC_SRIOV 4006 efx_fini_sriov(); 4007 #endif 4008 unregister_netdevice_notifier(&efx_netdev_notifier); 4009 4010 } 4011 4012 module_init(efx_init_module); 4013 module_exit(efx_exit_module); 4014 4015 MODULE_AUTHOR("Solarflare Communications and " 4016 "Michael Brown <mbrown@fensystems.co.uk>"); 4017 MODULE_DESCRIPTION("Solarflare network driver"); 4018 MODULE_LICENSE("GPL"); 4019 MODULE_DEVICE_TABLE(pci, efx_pci_table); 4020 MODULE_VERSION(EFX_DRIVER_VERSION); 4021