1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2018 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include "net_driver.h" 12 #include <linux/module.h> 13 #include <linux/filter.h> 14 #include "efx_channels.h" 15 #include "efx.h" 16 #include "efx_common.h" 17 #include "tx_common.h" 18 #include "rx_common.h" 19 #include "nic.h" 20 #include "sriov.h" 21 #include "workarounds.h" 22 23 /* This is the first interrupt mode to try out of: 24 * 0 => MSI-X 25 * 1 => MSI 26 * 2 => legacy 27 */ 28 unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX; 29 30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 31 * i.e. the number of CPUs among which we may distribute simultaneous 32 * interrupt handling. 33 * 34 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 35 * The default (0) means to assign an interrupt to each core. 36 */ 37 unsigned int efx_siena_rss_cpus; 38 39 static unsigned int irq_adapt_low_thresh = 8000; 40 module_param(irq_adapt_low_thresh, uint, 0644); 41 MODULE_PARM_DESC(irq_adapt_low_thresh, 42 "Threshold score for reducing IRQ moderation"); 43 44 static unsigned int irq_adapt_high_thresh = 16000; 45 module_param(irq_adapt_high_thresh, uint, 0644); 46 MODULE_PARM_DESC(irq_adapt_high_thresh, 47 "Threshold score for increasing IRQ moderation"); 48 49 /* This is the weight assigned to each of the (per-channel) virtual 50 * NAPI devices. 51 */ 52 static int napi_weight = 64; 53 54 static const struct efx_channel_type efx_default_channel_type; 55 56 /************* 57 * INTERRUPTS 58 *************/ 59 60 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) 61 { 62 cpumask_var_t filter_mask; 63 unsigned int count; 64 int cpu; 65 66 if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { 67 netif_warn(efx, probe, efx->net_dev, 68 "RSS disabled due to allocation failure\n"); 69 return 1; 70 } 71 72 cpumask_copy(filter_mask, cpu_online_mask); 73 if (local_node) 74 cpumask_and(filter_mask, filter_mask, 75 cpumask_of_pcibus(efx->pci_dev->bus)); 76 77 count = 0; 78 for_each_cpu(cpu, filter_mask) { 79 ++count; 80 cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu)); 81 } 82 83 free_cpumask_var(filter_mask); 84 85 return count; 86 } 87 88 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) 89 { 90 unsigned int count; 91 92 if (efx_siena_rss_cpus) { 93 count = efx_siena_rss_cpus; 94 } else { 95 count = count_online_cores(efx, true); 96 97 /* If no online CPUs in local node, fallback to any online CPUs */ 98 if (count == 0) 99 count = count_online_cores(efx, false); 100 } 101 102 if (count > EFX_MAX_RX_QUEUES) { 103 netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus, 104 warn, 105 "Reducing number of rx queues from %u to %u.\n", 106 count, EFX_MAX_RX_QUEUES); 107 count = EFX_MAX_RX_QUEUES; 108 } 109 110 /* If RSS is requested for the PF *and* VFs then we can't write RSS 111 * table entries that are inaccessible to VFs 112 */ 113 #ifdef CONFIG_SFC_SIENA_SRIOV 114 if (efx->type->sriov_wanted) { 115 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && 116 count > efx_vf_size(efx)) { 117 netif_warn(efx, probe, efx->net_dev, 118 "Reducing number of RSS channels from %u to %u for " 119 "VF support. Increase vf-msix-limit to use more " 120 "channels on the PF.\n", 121 count, efx_vf_size(efx)); 122 count = efx_vf_size(efx); 123 } 124 } 125 #endif 126 127 return count; 128 } 129 130 static int efx_allocate_msix_channels(struct efx_nic *efx, 131 unsigned int max_channels, 132 unsigned int extra_channels, 133 unsigned int parallelism) 134 { 135 unsigned int n_channels = parallelism; 136 int vec_count; 137 int tx_per_ev; 138 int n_xdp_tx; 139 int n_xdp_ev; 140 141 if (efx_siena_separate_tx_channels) 142 n_channels *= 2; 143 n_channels += extra_channels; 144 145 /* To allow XDP transmit to happen from arbitrary NAPI contexts 146 * we allocate a TX queue per CPU. We share event queues across 147 * multiple tx queues, assuming tx and ev queues are both 148 * maximum size. 149 */ 150 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); 151 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); 152 n_xdp_tx = num_possible_cpus(); 153 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); 154 155 vec_count = pci_msix_vec_count(efx->pci_dev); 156 if (vec_count < 0) 157 return vec_count; 158 159 max_channels = min_t(unsigned int, vec_count, max_channels); 160 161 /* Check resources. 162 * We need a channel per event queue, plus a VI per tx queue. 163 * This may be more pessimistic than it needs to be. 164 */ 165 if (n_channels >= max_channels) { 166 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; 167 netif_warn(efx, drv, efx->net_dev, 168 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", 169 n_xdp_ev, n_channels, max_channels); 170 netif_warn(efx, drv, efx->net_dev, 171 "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); 172 } else if (n_channels + n_xdp_tx > efx->max_vis) { 173 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; 174 netif_warn(efx, drv, efx->net_dev, 175 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", 176 n_xdp_tx, n_channels, efx->max_vis); 177 netif_warn(efx, drv, efx->net_dev, 178 "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); 179 } else if (n_channels + n_xdp_ev > max_channels) { 180 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; 181 netif_warn(efx, drv, efx->net_dev, 182 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", 183 n_xdp_ev, n_channels, max_channels); 184 185 n_xdp_ev = max_channels - n_channels; 186 netif_warn(efx, drv, efx->net_dev, 187 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", 188 DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); 189 } else { 190 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; 191 } 192 193 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { 194 efx->n_xdp_channels = n_xdp_ev; 195 efx->xdp_tx_per_channel = tx_per_ev; 196 efx->xdp_tx_queue_count = n_xdp_tx; 197 n_channels += n_xdp_ev; 198 netif_dbg(efx, drv, efx->net_dev, 199 "Allocating %d TX and %d event queues for XDP\n", 200 n_xdp_ev * tx_per_ev, n_xdp_ev); 201 } else { 202 efx->n_xdp_channels = 0; 203 efx->xdp_tx_per_channel = 0; 204 efx->xdp_tx_queue_count = n_xdp_tx; 205 } 206 207 if (vec_count < n_channels) { 208 netif_err(efx, drv, efx->net_dev, 209 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", 210 vec_count, n_channels); 211 netif_err(efx, drv, efx->net_dev, 212 "WARNING: Performance may be reduced.\n"); 213 n_channels = vec_count; 214 } 215 216 n_channels = min(n_channels, max_channels); 217 218 efx->n_channels = n_channels; 219 220 /* Ignore XDP tx channels when creating rx channels. */ 221 n_channels -= efx->n_xdp_channels; 222 223 if (efx_siena_separate_tx_channels) { 224 efx->n_tx_channels = 225 min(max(n_channels / 2, 1U), 226 efx->max_tx_channels); 227 efx->tx_channel_offset = 228 n_channels - efx->n_tx_channels; 229 efx->n_rx_channels = 230 max(n_channels - 231 efx->n_tx_channels, 1U); 232 } else { 233 efx->n_tx_channels = min(n_channels, efx->max_tx_channels); 234 efx->tx_channel_offset = 0; 235 efx->n_rx_channels = n_channels; 236 } 237 238 efx->n_rx_channels = min(efx->n_rx_channels, parallelism); 239 efx->n_tx_channels = min(efx->n_tx_channels, parallelism); 240 241 efx->xdp_channel_offset = n_channels; 242 243 netif_dbg(efx, drv, efx->net_dev, 244 "Allocating %u RX channels\n", 245 efx->n_rx_channels); 246 247 return efx->n_channels; 248 } 249 250 /* Probe the number and type of interrupts we are able to obtain, and 251 * the resulting numbers of channels and RX queues. 252 */ 253 int efx_siena_probe_interrupts(struct efx_nic *efx) 254 { 255 unsigned int extra_channels = 0; 256 unsigned int rss_spread; 257 unsigned int i, j; 258 int rc; 259 260 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) 261 if (efx->extra_channel_type[i]) 262 ++extra_channels; 263 264 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 265 unsigned int parallelism = efx_wanted_parallelism(efx); 266 struct msix_entry xentries[EFX_MAX_CHANNELS]; 267 unsigned int n_channels; 268 269 rc = efx_allocate_msix_channels(efx, efx->max_channels, 270 extra_channels, parallelism); 271 if (rc >= 0) { 272 n_channels = rc; 273 for (i = 0; i < n_channels; i++) 274 xentries[i].entry = i; 275 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, 276 n_channels); 277 } 278 if (rc < 0) { 279 /* Fall back to single channel MSI */ 280 netif_err(efx, drv, efx->net_dev, 281 "could not enable MSI-X\n"); 282 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) 283 efx->interrupt_mode = EFX_INT_MODE_MSI; 284 else 285 return rc; 286 } else if (rc < n_channels) { 287 netif_err(efx, drv, efx->net_dev, 288 "WARNING: Insufficient MSI-X vectors" 289 " available (%d < %u).\n", rc, n_channels); 290 netif_err(efx, drv, efx->net_dev, 291 "WARNING: Performance may be reduced.\n"); 292 n_channels = rc; 293 } 294 295 if (rc > 0) { 296 for (i = 0; i < efx->n_channels; i++) 297 efx_get_channel(efx, i)->irq = 298 xentries[i].vector; 299 } 300 } 301 302 /* Try single interrupt MSI */ 303 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 304 efx->n_channels = 1; 305 efx->n_rx_channels = 1; 306 efx->n_tx_channels = 1; 307 efx->n_xdp_channels = 0; 308 efx->xdp_channel_offset = efx->n_channels; 309 rc = pci_enable_msi(efx->pci_dev); 310 if (rc == 0) { 311 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; 312 } else { 313 netif_err(efx, drv, efx->net_dev, 314 "could not enable MSI\n"); 315 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) 316 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 317 else 318 return rc; 319 } 320 } 321 322 /* Assume legacy interrupts */ 323 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 324 efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); 325 efx->n_rx_channels = 1; 326 efx->n_tx_channels = 1; 327 efx->n_xdp_channels = 0; 328 efx->xdp_channel_offset = efx->n_channels; 329 efx->legacy_irq = efx->pci_dev->irq; 330 } 331 332 /* Assign extra channels if possible, before XDP channels */ 333 efx->n_extra_tx_channels = 0; 334 j = efx->xdp_channel_offset; 335 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { 336 if (!efx->extra_channel_type[i]) 337 continue; 338 if (j <= efx->tx_channel_offset + efx->n_tx_channels) { 339 efx->extra_channel_type[i]->handle_no_channel(efx); 340 } else { 341 --j; 342 efx_get_channel(efx, j)->type = 343 efx->extra_channel_type[i]; 344 if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) 345 efx->n_extra_tx_channels++; 346 } 347 } 348 349 rss_spread = efx->n_rx_channels; 350 /* RSS might be usable on VFs even if it is disabled on the PF */ 351 #ifdef CONFIG_SFC_SIENA_SRIOV 352 if (efx->type->sriov_wanted) { 353 efx->rss_spread = ((rss_spread > 1 || 354 !efx->type->sriov_wanted(efx)) ? 355 rss_spread : efx_vf_size(efx)); 356 return 0; 357 } 358 #endif 359 efx->rss_spread = rss_spread; 360 361 return 0; 362 } 363 364 #if defined(CONFIG_SMP) 365 void efx_siena_set_interrupt_affinity(struct efx_nic *efx) 366 { 367 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); 368 struct efx_channel *channel; 369 unsigned int cpu; 370 371 /* If no online CPUs in local node, fallback to any online CPU */ 372 if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids) 373 numa_mask = cpu_online_mask; 374 375 cpu = -1; 376 efx_for_each_channel(channel, efx) { 377 cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask); 378 if (cpu >= nr_cpu_ids) 379 cpu = cpumask_first_and(cpu_online_mask, numa_mask); 380 irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); 381 } 382 } 383 384 void efx_siena_clear_interrupt_affinity(struct efx_nic *efx) 385 { 386 struct efx_channel *channel; 387 388 efx_for_each_channel(channel, efx) 389 irq_set_affinity_hint(channel->irq, NULL); 390 } 391 #else 392 void 393 efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused) 394 { 395 } 396 397 void 398 efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused) 399 { 400 } 401 #endif /* CONFIG_SMP */ 402 403 void efx_siena_remove_interrupts(struct efx_nic *efx) 404 { 405 struct efx_channel *channel; 406 407 /* Remove MSI/MSI-X interrupts */ 408 efx_for_each_channel(channel, efx) 409 channel->irq = 0; 410 pci_disable_msi(efx->pci_dev); 411 pci_disable_msix(efx->pci_dev); 412 413 /* Remove legacy interrupt */ 414 efx->legacy_irq = 0; 415 } 416 417 /*************** 418 * EVENT QUEUES 419 ***************/ 420 421 /* Create event queue 422 * Event queue memory allocations are done only once. If the channel 423 * is reset, the memory buffer will be reused; this guards against 424 * errors during channel reset and also simplifies interrupt handling. 425 */ 426 static int efx_probe_eventq(struct efx_channel *channel) 427 { 428 struct efx_nic *efx = channel->efx; 429 unsigned long entries; 430 431 netif_dbg(efx, probe, efx->net_dev, 432 "chan %d create event queue\n", channel->channel); 433 434 /* Build an event queue with room for one event per tx and rx buffer, 435 * plus some extra for link state events and MCDI completions. 436 */ 437 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 438 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); 439 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; 440 441 return efx_nic_probe_eventq(channel); 442 } 443 444 /* Prepare channel's event queue */ 445 static int efx_init_eventq(struct efx_channel *channel) 446 { 447 struct efx_nic *efx = channel->efx; 448 int rc; 449 450 EFX_WARN_ON_PARANOID(channel->eventq_init); 451 452 netif_dbg(efx, drv, efx->net_dev, 453 "chan %d init event queue\n", channel->channel); 454 455 rc = efx_nic_init_eventq(channel); 456 if (rc == 0) { 457 efx->type->push_irq_moderation(channel); 458 channel->eventq_read_ptr = 0; 459 channel->eventq_init = true; 460 } 461 return rc; 462 } 463 464 /* Enable event queue processing and NAPI */ 465 void efx_siena_start_eventq(struct efx_channel *channel) 466 { 467 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 468 "chan %d start event queue\n", channel->channel); 469 470 /* Make sure the NAPI handler sees the enabled flag set */ 471 channel->enabled = true; 472 smp_wmb(); 473 474 napi_enable(&channel->napi_str); 475 efx_nic_eventq_read_ack(channel); 476 } 477 478 /* Disable event queue processing and NAPI */ 479 void efx_siena_stop_eventq(struct efx_channel *channel) 480 { 481 if (!channel->enabled) 482 return; 483 484 napi_disable(&channel->napi_str); 485 channel->enabled = false; 486 } 487 488 static void efx_fini_eventq(struct efx_channel *channel) 489 { 490 if (!channel->eventq_init) 491 return; 492 493 netif_dbg(channel->efx, drv, channel->efx->net_dev, 494 "chan %d fini event queue\n", channel->channel); 495 496 efx_nic_fini_eventq(channel); 497 channel->eventq_init = false; 498 } 499 500 static void efx_remove_eventq(struct efx_channel *channel) 501 { 502 netif_dbg(channel->efx, drv, channel->efx->net_dev, 503 "chan %d remove event queue\n", channel->channel); 504 505 efx_nic_remove_eventq(channel); 506 } 507 508 /************************************************************************** 509 * 510 * Channel handling 511 * 512 *************************************************************************/ 513 514 #ifdef CONFIG_RFS_ACCEL 515 static void efx_filter_rfs_expire(struct work_struct *data) 516 { 517 struct delayed_work *dwork = to_delayed_work(data); 518 struct efx_channel *channel; 519 unsigned int time, quota; 520 521 channel = container_of(dwork, struct efx_channel, filter_work); 522 time = jiffies - channel->rfs_last_expiry; 523 quota = channel->rfs_filter_count * time / (30 * HZ); 524 if (quota >= 20 && __efx_siena_filter_rfs_expire(channel, 525 min(channel->rfs_filter_count, quota))) 526 channel->rfs_last_expiry += time; 527 /* Ensure we do more work eventually even if NAPI poll is not happening */ 528 schedule_delayed_work(dwork, 30 * HZ); 529 } 530 #endif 531 532 /* Allocate and initialise a channel structure. */ 533 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) 534 { 535 struct efx_rx_queue *rx_queue; 536 struct efx_tx_queue *tx_queue; 537 struct efx_channel *channel; 538 int j; 539 540 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 541 if (!channel) 542 return NULL; 543 544 channel->efx = efx; 545 channel->channel = i; 546 channel->type = &efx_default_channel_type; 547 548 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { 549 tx_queue = &channel->tx_queue[j]; 550 tx_queue->efx = efx; 551 tx_queue->queue = -1; 552 tx_queue->label = j; 553 tx_queue->channel = channel; 554 } 555 556 #ifdef CONFIG_RFS_ACCEL 557 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); 558 #endif 559 560 rx_queue = &channel->rx_queue; 561 rx_queue->efx = efx; 562 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); 563 564 return channel; 565 } 566 567 int efx_siena_init_channels(struct efx_nic *efx) 568 { 569 unsigned int i; 570 571 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 572 efx->channel[i] = efx_alloc_channel(efx, i); 573 if (!efx->channel[i]) 574 return -ENOMEM; 575 efx->msi_context[i].efx = efx; 576 efx->msi_context[i].index = i; 577 } 578 579 /* Higher numbered interrupt modes are less capable! */ 580 efx->interrupt_mode = min(efx->type->min_interrupt_mode, 581 efx_siena_interrupt_mode); 582 583 efx->max_channels = EFX_MAX_CHANNELS; 584 efx->max_tx_channels = EFX_MAX_CHANNELS; 585 586 return 0; 587 } 588 589 void efx_siena_fini_channels(struct efx_nic *efx) 590 { 591 unsigned int i; 592 593 for (i = 0; i < EFX_MAX_CHANNELS; i++) 594 if (efx->channel[i]) { 595 kfree(efx->channel[i]); 596 efx->channel[i] = NULL; 597 } 598 } 599 600 /* Allocate and initialise a channel structure, copying parameters 601 * (but not resources) from an old channel structure. 602 */ 603 static 604 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) 605 { 606 struct efx_rx_queue *rx_queue; 607 struct efx_tx_queue *tx_queue; 608 struct efx_channel *channel; 609 int j; 610 611 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 612 if (!channel) 613 return NULL; 614 615 *channel = *old_channel; 616 617 channel->napi_dev = NULL; 618 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); 619 channel->napi_str.napi_id = 0; 620 channel->napi_str.state = 0; 621 memset(&channel->eventq, 0, sizeof(channel->eventq)); 622 623 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { 624 tx_queue = &channel->tx_queue[j]; 625 if (tx_queue->channel) 626 tx_queue->channel = channel; 627 tx_queue->buffer = NULL; 628 tx_queue->cb_page = NULL; 629 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 630 } 631 632 rx_queue = &channel->rx_queue; 633 rx_queue->buffer = NULL; 634 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 635 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); 636 #ifdef CONFIG_RFS_ACCEL 637 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); 638 #endif 639 640 return channel; 641 } 642 643 static int efx_probe_channel(struct efx_channel *channel) 644 { 645 struct efx_tx_queue *tx_queue; 646 struct efx_rx_queue *rx_queue; 647 int rc; 648 649 netif_dbg(channel->efx, probe, channel->efx->net_dev, 650 "creating channel %d\n", channel->channel); 651 652 rc = channel->type->pre_probe(channel); 653 if (rc) 654 goto fail; 655 656 rc = efx_probe_eventq(channel); 657 if (rc) 658 goto fail; 659 660 efx_for_each_channel_tx_queue(tx_queue, channel) { 661 rc = efx_siena_probe_tx_queue(tx_queue); 662 if (rc) 663 goto fail; 664 } 665 666 efx_for_each_channel_rx_queue(rx_queue, channel) { 667 rc = efx_siena_probe_rx_queue(rx_queue); 668 if (rc) 669 goto fail; 670 } 671 672 channel->rx_list = NULL; 673 674 return 0; 675 676 fail: 677 efx_siena_remove_channel(channel); 678 return rc; 679 } 680 681 static void efx_get_channel_name(struct efx_channel *channel, char *buf, 682 size_t len) 683 { 684 struct efx_nic *efx = channel->efx; 685 const char *type; 686 int number; 687 688 number = channel->channel; 689 690 if (number >= efx->xdp_channel_offset && 691 !WARN_ON_ONCE(!efx->n_xdp_channels)) { 692 type = "-xdp"; 693 number -= efx->xdp_channel_offset; 694 } else if (efx->tx_channel_offset == 0) { 695 type = ""; 696 } else if (number < efx->tx_channel_offset) { 697 type = "-rx"; 698 } else { 699 type = "-tx"; 700 number -= efx->tx_channel_offset; 701 } 702 snprintf(buf, len, "%s%s-%d", efx->name, type, number); 703 } 704 705 void efx_siena_set_channel_names(struct efx_nic *efx) 706 { 707 struct efx_channel *channel; 708 709 efx_for_each_channel(channel, efx) 710 channel->type->get_name(channel, 711 efx->msi_context[channel->channel].name, 712 sizeof(efx->msi_context[0].name)); 713 } 714 715 int efx_siena_probe_channels(struct efx_nic *efx) 716 { 717 struct efx_channel *channel; 718 int rc; 719 720 /* Restart special buffer allocation */ 721 efx->next_buffer_table = 0; 722 723 /* Probe channels in reverse, so that any 'extra' channels 724 * use the start of the buffer table. This allows the traffic 725 * channels to be resized without moving them or wasting the 726 * entries before them. 727 */ 728 efx_for_each_channel_rev(channel, efx) { 729 rc = efx_probe_channel(channel); 730 if (rc) { 731 netif_err(efx, probe, efx->net_dev, 732 "failed to create channel %d\n", 733 channel->channel); 734 goto fail; 735 } 736 } 737 efx_siena_set_channel_names(efx); 738 739 return 0; 740 741 fail: 742 efx_siena_remove_channels(efx); 743 return rc; 744 } 745 746 void efx_siena_remove_channel(struct efx_channel *channel) 747 { 748 struct efx_tx_queue *tx_queue; 749 struct efx_rx_queue *rx_queue; 750 751 netif_dbg(channel->efx, drv, channel->efx->net_dev, 752 "destroy chan %d\n", channel->channel); 753 754 efx_for_each_channel_rx_queue(rx_queue, channel) 755 efx_siena_remove_rx_queue(rx_queue); 756 efx_for_each_channel_tx_queue(tx_queue, channel) 757 efx_siena_remove_tx_queue(tx_queue); 758 efx_remove_eventq(channel); 759 channel->type->post_remove(channel); 760 } 761 762 void efx_siena_remove_channels(struct efx_nic *efx) 763 { 764 struct efx_channel *channel; 765 766 efx_for_each_channel(channel, efx) 767 efx_siena_remove_channel(channel); 768 769 kfree(efx->xdp_tx_queues); 770 } 771 772 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, 773 struct efx_tx_queue *tx_queue) 774 { 775 if (xdp_queue_number >= efx->xdp_tx_queue_count) 776 return -EINVAL; 777 778 netif_dbg(efx, drv, efx->net_dev, 779 "Channel %u TXQ %u is XDP %u, HW %u\n", 780 tx_queue->channel->channel, tx_queue->label, 781 xdp_queue_number, tx_queue->queue); 782 efx->xdp_tx_queues[xdp_queue_number] = tx_queue; 783 return 0; 784 } 785 786 static void efx_set_xdp_channels(struct efx_nic *efx) 787 { 788 struct efx_tx_queue *tx_queue; 789 struct efx_channel *channel; 790 unsigned int next_queue = 0; 791 int xdp_queue_number = 0; 792 int rc; 793 794 /* We need to mark which channels really have RX and TX 795 * queues, and adjust the TX queue numbers if we have separate 796 * RX-only and TX-only channels. 797 */ 798 efx_for_each_channel(channel, efx) { 799 if (channel->channel < efx->tx_channel_offset) 800 continue; 801 802 if (efx_channel_is_xdp_tx(channel)) { 803 efx_for_each_channel_tx_queue(tx_queue, channel) { 804 tx_queue->queue = next_queue++; 805 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, 806 tx_queue); 807 if (rc == 0) 808 xdp_queue_number++; 809 } 810 } else { 811 efx_for_each_channel_tx_queue(tx_queue, channel) { 812 tx_queue->queue = next_queue++; 813 netif_dbg(efx, drv, efx->net_dev, 814 "Channel %u TXQ %u is HW %u\n", 815 channel->channel, tx_queue->label, 816 tx_queue->queue); 817 } 818 819 /* If XDP is borrowing queues from net stack, it must 820 * use the queue with no csum offload, which is the 821 * first one of the channel 822 * (note: tx_queue_by_type is not initialized yet) 823 */ 824 if (efx->xdp_txq_queues_mode == 825 EFX_XDP_TX_QUEUES_BORROWED) { 826 tx_queue = &channel->tx_queue[0]; 827 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, 828 tx_queue); 829 if (rc == 0) 830 xdp_queue_number++; 831 } 832 } 833 } 834 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && 835 xdp_queue_number != efx->xdp_tx_queue_count); 836 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && 837 xdp_queue_number > efx->xdp_tx_queue_count); 838 839 /* If we have more CPUs than assigned XDP TX queues, assign the already 840 * existing queues to the exceeding CPUs 841 */ 842 next_queue = 0; 843 while (xdp_queue_number < efx->xdp_tx_queue_count) { 844 tx_queue = efx->xdp_tx_queues[next_queue++]; 845 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); 846 if (rc == 0) 847 xdp_queue_number++; 848 } 849 } 850 851 static int efx_soft_enable_interrupts(struct efx_nic *efx); 852 static void efx_soft_disable_interrupts(struct efx_nic *efx); 853 static void efx_init_napi_channel(struct efx_channel *channel); 854 static void efx_fini_napi_channel(struct efx_channel *channel); 855 856 int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, 857 u32 txq_entries) 858 { 859 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 860 unsigned int i, next_buffer_table = 0; 861 u32 old_rxq_entries, old_txq_entries; 862 int rc, rc2; 863 864 rc = efx_check_disabled(efx); 865 if (rc) 866 return rc; 867 868 /* Not all channels should be reallocated. We must avoid 869 * reallocating their buffer table entries. 870 */ 871 efx_for_each_channel(channel, efx) { 872 struct efx_rx_queue *rx_queue; 873 struct efx_tx_queue *tx_queue; 874 875 if (channel->type->copy) 876 continue; 877 next_buffer_table = max(next_buffer_table, 878 channel->eventq.index + 879 channel->eventq.entries); 880 efx_for_each_channel_rx_queue(rx_queue, channel) 881 next_buffer_table = max(next_buffer_table, 882 rx_queue->rxd.index + 883 rx_queue->rxd.entries); 884 efx_for_each_channel_tx_queue(tx_queue, channel) 885 next_buffer_table = max(next_buffer_table, 886 tx_queue->txd.index + 887 tx_queue->txd.entries); 888 } 889 890 efx_device_detach_sync(efx); 891 efx_siena_stop_all(efx); 892 efx_soft_disable_interrupts(efx); 893 894 /* Clone channels (where possible) */ 895 memset(other_channel, 0, sizeof(other_channel)); 896 for (i = 0; i < efx->n_channels; i++) { 897 channel = efx->channel[i]; 898 if (channel->type->copy) 899 channel = channel->type->copy(channel); 900 if (!channel) { 901 rc = -ENOMEM; 902 goto out; 903 } 904 other_channel[i] = channel; 905 } 906 907 /* Swap entry counts and channel pointers */ 908 old_rxq_entries = efx->rxq_entries; 909 old_txq_entries = efx->txq_entries; 910 efx->rxq_entries = rxq_entries; 911 efx->txq_entries = txq_entries; 912 for (i = 0; i < efx->n_channels; i++) 913 swap(efx->channel[i], other_channel[i]); 914 915 /* Restart buffer table allocation */ 916 efx->next_buffer_table = next_buffer_table; 917 918 for (i = 0; i < efx->n_channels; i++) { 919 channel = efx->channel[i]; 920 if (!channel->type->copy) 921 continue; 922 rc = efx_probe_channel(channel); 923 if (rc) 924 goto rollback; 925 efx_init_napi_channel(efx->channel[i]); 926 } 927 928 efx_set_xdp_channels(efx); 929 out: 930 /* Destroy unused channel structures */ 931 for (i = 0; i < efx->n_channels; i++) { 932 channel = other_channel[i]; 933 if (channel && channel->type->copy) { 934 efx_fini_napi_channel(channel); 935 efx_siena_remove_channel(channel); 936 kfree(channel); 937 } 938 } 939 940 rc2 = efx_soft_enable_interrupts(efx); 941 if (rc2) { 942 rc = rc ? rc : rc2; 943 netif_err(efx, drv, efx->net_dev, 944 "unable to restart interrupts on channel reallocation\n"); 945 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); 946 } else { 947 efx_siena_start_all(efx); 948 efx_device_attach_if_not_resetting(efx); 949 } 950 return rc; 951 952 rollback: 953 /* Swap back */ 954 efx->rxq_entries = old_rxq_entries; 955 efx->txq_entries = old_txq_entries; 956 for (i = 0; i < efx->n_channels; i++) 957 swap(efx->channel[i], other_channel[i]); 958 goto out; 959 } 960 961 int efx_siena_set_channels(struct efx_nic *efx) 962 { 963 struct efx_channel *channel; 964 int rc; 965 966 efx->tx_channel_offset = 967 efx_siena_separate_tx_channels ? 968 efx->n_channels - efx->n_tx_channels : 0; 969 970 if (efx->xdp_tx_queue_count) { 971 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); 972 973 /* Allocate array for XDP TX queue lookup. */ 974 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, 975 sizeof(*efx->xdp_tx_queues), 976 GFP_KERNEL); 977 if (!efx->xdp_tx_queues) 978 return -ENOMEM; 979 } 980 981 efx_for_each_channel(channel, efx) { 982 if (channel->channel < efx->n_rx_channels) 983 channel->rx_queue.core_index = channel->channel; 984 else 985 channel->rx_queue.core_index = -1; 986 } 987 988 efx_set_xdp_channels(efx); 989 990 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 991 if (rc) 992 return rc; 993 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 994 } 995 996 static bool efx_default_channel_want_txqs(struct efx_channel *channel) 997 { 998 return channel->channel - channel->efx->tx_channel_offset < 999 channel->efx->n_tx_channels; 1000 } 1001 1002 /************* 1003 * START/STOP 1004 *************/ 1005 1006 static int efx_soft_enable_interrupts(struct efx_nic *efx) 1007 { 1008 struct efx_channel *channel, *end_channel; 1009 int rc; 1010 1011 BUG_ON(efx->state == STATE_DISABLED); 1012 1013 efx->irq_soft_enabled = true; 1014 smp_wmb(); 1015 1016 efx_for_each_channel(channel, efx) { 1017 if (!channel->type->keep_eventq) { 1018 rc = efx_init_eventq(channel); 1019 if (rc) 1020 goto fail; 1021 } 1022 efx_siena_start_eventq(channel); 1023 } 1024 1025 efx_siena_mcdi_mode_event(efx); 1026 1027 return 0; 1028 fail: 1029 end_channel = channel; 1030 efx_for_each_channel(channel, efx) { 1031 if (channel == end_channel) 1032 break; 1033 efx_siena_stop_eventq(channel); 1034 if (!channel->type->keep_eventq) 1035 efx_fini_eventq(channel); 1036 } 1037 1038 return rc; 1039 } 1040 1041 static void efx_soft_disable_interrupts(struct efx_nic *efx) 1042 { 1043 struct efx_channel *channel; 1044 1045 if (efx->state == STATE_DISABLED) 1046 return; 1047 1048 efx_siena_mcdi_mode_poll(efx); 1049 1050 efx->irq_soft_enabled = false; 1051 smp_wmb(); 1052 1053 if (efx->legacy_irq) 1054 synchronize_irq(efx->legacy_irq); 1055 1056 efx_for_each_channel(channel, efx) { 1057 if (channel->irq) 1058 synchronize_irq(channel->irq); 1059 1060 efx_siena_stop_eventq(channel); 1061 if (!channel->type->keep_eventq) 1062 efx_fini_eventq(channel); 1063 } 1064 1065 /* Flush the asynchronous MCDI request queue */ 1066 efx_siena_mcdi_flush_async(efx); 1067 } 1068 1069 int efx_siena_enable_interrupts(struct efx_nic *efx) 1070 { 1071 struct efx_channel *channel, *end_channel; 1072 int rc; 1073 1074 /* TODO: Is this really a bug? */ 1075 BUG_ON(efx->state == STATE_DISABLED); 1076 1077 if (efx->eeh_disabled_legacy_irq) { 1078 enable_irq(efx->legacy_irq); 1079 efx->eeh_disabled_legacy_irq = false; 1080 } 1081 1082 efx->type->irq_enable_master(efx); 1083 1084 efx_for_each_channel(channel, efx) { 1085 if (channel->type->keep_eventq) { 1086 rc = efx_init_eventq(channel); 1087 if (rc) 1088 goto fail; 1089 } 1090 } 1091 1092 rc = efx_soft_enable_interrupts(efx); 1093 if (rc) 1094 goto fail; 1095 1096 return 0; 1097 1098 fail: 1099 end_channel = channel; 1100 efx_for_each_channel(channel, efx) { 1101 if (channel == end_channel) 1102 break; 1103 if (channel->type->keep_eventq) 1104 efx_fini_eventq(channel); 1105 } 1106 1107 efx->type->irq_disable_non_ev(efx); 1108 1109 return rc; 1110 } 1111 1112 void efx_siena_disable_interrupts(struct efx_nic *efx) 1113 { 1114 struct efx_channel *channel; 1115 1116 efx_soft_disable_interrupts(efx); 1117 1118 efx_for_each_channel(channel, efx) { 1119 if (channel->type->keep_eventq) 1120 efx_fini_eventq(channel); 1121 } 1122 1123 efx->type->irq_disable_non_ev(efx); 1124 } 1125 1126 void efx_siena_start_channels(struct efx_nic *efx) 1127 { 1128 struct efx_tx_queue *tx_queue; 1129 struct efx_rx_queue *rx_queue; 1130 struct efx_channel *channel; 1131 1132 efx_for_each_channel_rev(channel, efx) { 1133 efx_for_each_channel_tx_queue(tx_queue, channel) { 1134 efx_siena_init_tx_queue(tx_queue); 1135 atomic_inc(&efx->active_queues); 1136 } 1137 1138 efx_for_each_channel_rx_queue(rx_queue, channel) { 1139 efx_siena_init_rx_queue(rx_queue); 1140 atomic_inc(&efx->active_queues); 1141 efx_siena_stop_eventq(channel); 1142 efx_siena_fast_push_rx_descriptors(rx_queue, false); 1143 efx_siena_start_eventq(channel); 1144 } 1145 1146 WARN_ON(channel->rx_pkt_n_frags); 1147 } 1148 } 1149 1150 void efx_siena_stop_channels(struct efx_nic *efx) 1151 { 1152 struct efx_tx_queue *tx_queue; 1153 struct efx_rx_queue *rx_queue; 1154 struct efx_channel *channel; 1155 int rc = 0; 1156 1157 /* Stop RX refill */ 1158 efx_for_each_channel(channel, efx) { 1159 efx_for_each_channel_rx_queue(rx_queue, channel) 1160 rx_queue->refill_enabled = false; 1161 } 1162 1163 efx_for_each_channel(channel, efx) { 1164 /* RX packet processing is pipelined, so wait for the 1165 * NAPI handler to complete. At least event queue 0 1166 * might be kept active by non-data events, so don't 1167 * use napi_synchronize() but actually disable NAPI 1168 * temporarily. 1169 */ 1170 if (efx_channel_has_rx_queue(channel)) { 1171 efx_siena_stop_eventq(channel); 1172 efx_siena_start_eventq(channel); 1173 } 1174 } 1175 1176 if (efx->type->fini_dmaq) 1177 rc = efx->type->fini_dmaq(efx); 1178 1179 if (rc) { 1180 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 1181 } else { 1182 netif_dbg(efx, drv, efx->net_dev, 1183 "successfully flushed all queues\n"); 1184 } 1185 1186 efx_for_each_channel(channel, efx) { 1187 efx_for_each_channel_rx_queue(rx_queue, channel) 1188 efx_siena_fini_rx_queue(rx_queue); 1189 efx_for_each_channel_tx_queue(tx_queue, channel) 1190 efx_siena_fini_tx_queue(tx_queue); 1191 } 1192 } 1193 1194 /************************************************************************** 1195 * 1196 * NAPI interface 1197 * 1198 *************************************************************************/ 1199 1200 /* Process channel's event queue 1201 * 1202 * This function is responsible for processing the event queue of a 1203 * single channel. The caller must guarantee that this function will 1204 * never be concurrently called more than once on the same channel, 1205 * though different channels may be being processed concurrently. 1206 */ 1207 static int efx_process_channel(struct efx_channel *channel, int budget) 1208 { 1209 struct efx_tx_queue *tx_queue; 1210 struct list_head rx_list; 1211 int spent; 1212 1213 if (unlikely(!channel->enabled)) 1214 return 0; 1215 1216 /* Prepare the batch receive list */ 1217 EFX_WARN_ON_PARANOID(channel->rx_list != NULL); 1218 INIT_LIST_HEAD(&rx_list); 1219 channel->rx_list = &rx_list; 1220 1221 efx_for_each_channel_tx_queue(tx_queue, channel) { 1222 tx_queue->pkts_compl = 0; 1223 tx_queue->bytes_compl = 0; 1224 } 1225 1226 spent = efx_nic_process_eventq(channel, budget); 1227 if (spent && efx_channel_has_rx_queue(channel)) { 1228 struct efx_rx_queue *rx_queue = 1229 efx_channel_get_rx_queue(channel); 1230 1231 efx_rx_flush_packet(channel); 1232 efx_siena_fast_push_rx_descriptors(rx_queue, true); 1233 } 1234 1235 /* Update BQL */ 1236 efx_for_each_channel_tx_queue(tx_queue, channel) { 1237 if (tx_queue->bytes_compl) { 1238 netdev_tx_completed_queue(tx_queue->core_txq, 1239 tx_queue->pkts_compl, 1240 tx_queue->bytes_compl); 1241 } 1242 } 1243 1244 /* Receive any packets we queued up */ 1245 netif_receive_skb_list(channel->rx_list); 1246 channel->rx_list = NULL; 1247 1248 return spent; 1249 } 1250 1251 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) 1252 { 1253 int step = efx->irq_mod_step_us; 1254 1255 if (channel->irq_mod_score < irq_adapt_low_thresh) { 1256 if (channel->irq_moderation_us > step) { 1257 channel->irq_moderation_us -= step; 1258 efx->type->push_irq_moderation(channel); 1259 } 1260 } else if (channel->irq_mod_score > irq_adapt_high_thresh) { 1261 if (channel->irq_moderation_us < 1262 efx->irq_rx_moderation_us) { 1263 channel->irq_moderation_us += step; 1264 efx->type->push_irq_moderation(channel); 1265 } 1266 } 1267 1268 channel->irq_count = 0; 1269 channel->irq_mod_score = 0; 1270 } 1271 1272 /* NAPI poll handler 1273 * 1274 * NAPI guarantees serialisation of polls of the same device, which 1275 * provides the guarantee required by efx_process_channel(). 1276 */ 1277 static int efx_poll(struct napi_struct *napi, int budget) 1278 { 1279 struct efx_channel *channel = 1280 container_of(napi, struct efx_channel, napi_str); 1281 struct efx_nic *efx = channel->efx; 1282 #ifdef CONFIG_RFS_ACCEL 1283 unsigned int time; 1284 #endif 1285 int spent; 1286 1287 netif_vdbg(efx, intr, efx->net_dev, 1288 "channel %d NAPI poll executing on CPU %d\n", 1289 channel->channel, raw_smp_processor_id()); 1290 1291 spent = efx_process_channel(channel, budget); 1292 1293 xdp_do_flush_map(); 1294 1295 if (spent < budget) { 1296 if (efx_channel_has_rx_queue(channel) && 1297 efx->irq_rx_adaptive && 1298 unlikely(++channel->irq_count == 1000)) { 1299 efx_update_irq_mod(efx, channel); 1300 } 1301 1302 #ifdef CONFIG_RFS_ACCEL 1303 /* Perhaps expire some ARFS filters */ 1304 time = jiffies - channel->rfs_last_expiry; 1305 /* Would our quota be >= 20? */ 1306 if (channel->rfs_filter_count * time >= 600 * HZ) 1307 mod_delayed_work(system_wq, &channel->filter_work, 0); 1308 #endif 1309 1310 /* There is no race here; although napi_disable() will 1311 * only wait for napi_complete(), this isn't a problem 1312 * since efx_nic_eventq_read_ack() will have no effect if 1313 * interrupts have already been disabled. 1314 */ 1315 if (napi_complete_done(napi, spent)) 1316 efx_nic_eventq_read_ack(channel); 1317 } 1318 1319 return spent; 1320 } 1321 1322 static void efx_init_napi_channel(struct efx_channel *channel) 1323 { 1324 struct efx_nic *efx = channel->efx; 1325 1326 channel->napi_dev = efx->net_dev; 1327 netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, 1328 napi_weight); 1329 } 1330 1331 void efx_siena_init_napi(struct efx_nic *efx) 1332 { 1333 struct efx_channel *channel; 1334 1335 efx_for_each_channel(channel, efx) 1336 efx_init_napi_channel(channel); 1337 } 1338 1339 static void efx_fini_napi_channel(struct efx_channel *channel) 1340 { 1341 if (channel->napi_dev) 1342 netif_napi_del(&channel->napi_str); 1343 1344 channel->napi_dev = NULL; 1345 } 1346 1347 void efx_siena_fini_napi(struct efx_nic *efx) 1348 { 1349 struct efx_channel *channel; 1350 1351 efx_for_each_channel(channel, efx) 1352 efx_fini_napi_channel(channel); 1353 } 1354 1355 /*************** 1356 * Housekeeping 1357 ***************/ 1358 1359 static int efx_channel_dummy_op_int(struct efx_channel *channel) 1360 { 1361 return 0; 1362 } 1363 1364 void efx_siena_channel_dummy_op_void(struct efx_channel *channel) 1365 { 1366 } 1367 1368 static const struct efx_channel_type efx_default_channel_type = { 1369 .pre_probe = efx_channel_dummy_op_int, 1370 .post_remove = efx_siena_channel_dummy_op_void, 1371 .get_name = efx_get_channel_name, 1372 .copy = efx_copy_channel, 1373 .want_txqs = efx_default_channel_want_txqs, 1374 .keep_eventq = false, 1375 .want_pio = true, 1376 }; 1377