1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2018 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include "net_driver.h" 12 #include <linux/module.h> 13 #include <linux/iommu.h> 14 #include "efx.h" 15 #include "nic.h" 16 #include "rx_common.h" 17 18 /* This is the percentage fill level below which new RX descriptors 19 * will be added to the RX descriptor ring. 20 */ 21 static unsigned int rx_refill_threshold; 22 module_param(rx_refill_threshold, uint, 0444); 23 MODULE_PARM_DESC(rx_refill_threshold, 24 "RX descriptor ring refill threshold (%)"); 25 26 /* RX maximum head room required. 27 * 28 * This must be at least 1 to prevent overflow, plus one packet-worth 29 * to allow pipelined receives. 30 */ 31 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) 32 33 /* Check the RX page recycle ring for a page that can be reused. */ 34 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) 35 { 36 struct efx_nic *efx = rx_queue->efx; 37 struct efx_rx_page_state *state; 38 unsigned int index; 39 struct page *page; 40 41 if (unlikely(!rx_queue->page_ring)) 42 return NULL; 43 index = rx_queue->page_remove & rx_queue->page_ptr_mask; 44 page = rx_queue->page_ring[index]; 45 if (page == NULL) 46 return NULL; 47 48 rx_queue->page_ring[index] = NULL; 49 /* page_remove cannot exceed page_add. */ 50 if (rx_queue->page_remove != rx_queue->page_add) 51 ++rx_queue->page_remove; 52 53 /* If page_count is 1 then we hold the only reference to this page. */ 54 if (page_count(page) == 1) { 55 ++rx_queue->page_recycle_count; 56 return page; 57 } else { 58 state = page_address(page); 59 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 60 PAGE_SIZE << efx->rx_buffer_order, 61 DMA_FROM_DEVICE); 62 put_page(page); 63 ++rx_queue->page_recycle_failed; 64 } 65 66 return NULL; 67 } 68 69 /* Attempt to recycle the page if there is an RX recycle ring; the page can 70 * only be added if this is the final RX buffer, to prevent pages being used in 71 * the descriptor ring and appearing in the recycle ring simultaneously. 72 */ 73 static void efx_recycle_rx_page(struct efx_channel *channel, 74 struct efx_rx_buffer *rx_buf) 75 { 76 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 77 struct efx_nic *efx = rx_queue->efx; 78 struct page *page = rx_buf->page; 79 unsigned int index; 80 81 /* Only recycle the page after processing the final buffer. */ 82 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) 83 return; 84 85 index = rx_queue->page_add & rx_queue->page_ptr_mask; 86 if (rx_queue->page_ring[index] == NULL) { 87 unsigned int read_index = rx_queue->page_remove & 88 rx_queue->page_ptr_mask; 89 90 /* The next slot in the recycle ring is available, but 91 * increment page_remove if the read pointer currently 92 * points here. 93 */ 94 if (read_index == index) 95 ++rx_queue->page_remove; 96 rx_queue->page_ring[index] = page; 97 ++rx_queue->page_add; 98 return; 99 } 100 ++rx_queue->page_recycle_full; 101 efx_unmap_rx_buffer(efx, rx_buf); 102 put_page(rx_buf->page); 103 } 104 105 /* Recycle the pages that are used by buffers that have just been received. */ 106 void efx_recycle_rx_pages(struct efx_channel *channel, 107 struct efx_rx_buffer *rx_buf, 108 unsigned int n_frags) 109 { 110 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 111 112 if (unlikely(!rx_queue->page_ring)) 113 return; 114 115 do { 116 efx_recycle_rx_page(channel, rx_buf); 117 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 118 } while (--n_frags); 119 } 120 121 void efx_discard_rx_packet(struct efx_channel *channel, 122 struct efx_rx_buffer *rx_buf, 123 unsigned int n_frags) 124 { 125 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 126 127 efx_recycle_rx_pages(channel, rx_buf, n_frags); 128 129 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 130 } 131 132 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) 133 { 134 unsigned int bufs_in_recycle_ring, page_ring_size; 135 struct efx_nic *efx = rx_queue->efx; 136 137 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); 138 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 139 efx->rx_bufs_per_page); 140 rx_queue->page_ring = kcalloc(page_ring_size, 141 sizeof(*rx_queue->page_ring), GFP_KERNEL); 142 if (!rx_queue->page_ring) 143 rx_queue->page_ptr_mask = 0; 144 else 145 rx_queue->page_ptr_mask = page_ring_size - 1; 146 } 147 148 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) 149 { 150 struct efx_nic *efx = rx_queue->efx; 151 int i; 152 153 /* Unmap and release the pages in the recycle ring. Remove the ring. */ 154 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 155 struct page *page = rx_queue->page_ring[i]; 156 struct efx_rx_page_state *state; 157 158 if (page == NULL) 159 continue; 160 161 state = page_address(page); 162 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 163 PAGE_SIZE << efx->rx_buffer_order, 164 DMA_FROM_DEVICE); 165 put_page(page); 166 } 167 kfree(rx_queue->page_ring); 168 rx_queue->page_ring = NULL; 169 } 170 171 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 172 struct efx_rx_buffer *rx_buf) 173 { 174 /* Release the page reference we hold for the buffer. */ 175 if (rx_buf->page) 176 put_page(rx_buf->page); 177 178 /* If this is the last buffer in a page, unmap and free it. */ 179 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 180 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 181 efx_free_rx_buffers(rx_queue, rx_buf, 1); 182 } 183 rx_buf->page = NULL; 184 } 185 186 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 187 { 188 struct efx_nic *efx = rx_queue->efx; 189 unsigned int entries; 190 int rc; 191 192 /* Create the smallest power-of-two aligned ring */ 193 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 194 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 195 rx_queue->ptr_mask = entries - 1; 196 197 netif_dbg(efx, probe, efx->net_dev, 198 "creating RX queue %d size %#x mask %#x\n", 199 efx_rx_queue_index(rx_queue), efx->rxq_entries, 200 rx_queue->ptr_mask); 201 202 /* Allocate RX buffers */ 203 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 204 GFP_KERNEL); 205 if (!rx_queue->buffer) 206 return -ENOMEM; 207 208 rc = efx_nic_probe_rx(rx_queue); 209 if (rc) { 210 kfree(rx_queue->buffer); 211 rx_queue->buffer = NULL; 212 } 213 214 return rc; 215 } 216 217 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 218 { 219 unsigned int max_fill, trigger, max_trigger; 220 struct efx_nic *efx = rx_queue->efx; 221 int rc = 0; 222 223 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 224 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 225 226 /* Initialise ptr fields */ 227 rx_queue->added_count = 0; 228 rx_queue->notified_count = 0; 229 rx_queue->removed_count = 0; 230 rx_queue->min_fill = -1U; 231 efx_init_rx_recycle_ring(rx_queue); 232 233 rx_queue->page_remove = 0; 234 rx_queue->page_add = rx_queue->page_ptr_mask + 1; 235 rx_queue->page_recycle_count = 0; 236 rx_queue->page_recycle_failed = 0; 237 rx_queue->page_recycle_full = 0; 238 239 /* Initialise limit fields */ 240 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 241 max_trigger = 242 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; 243 if (rx_refill_threshold != 0) { 244 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 245 if (trigger > max_trigger) 246 trigger = max_trigger; 247 } else { 248 trigger = max_trigger; 249 } 250 251 rx_queue->max_fill = max_fill; 252 rx_queue->fast_fill_trigger = trigger; 253 rx_queue->refill_enabled = true; 254 255 /* Initialise XDP queue information */ 256 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, 257 rx_queue->core_index, 0); 258 259 if (rc) { 260 netif_err(efx, rx_err, efx->net_dev, 261 "Failure to initialise XDP queue information rc=%d\n", 262 rc); 263 efx->xdp_rxq_info_failed = true; 264 } else { 265 rx_queue->xdp_rxq_info_valid = true; 266 } 267 268 /* Set up RX descriptor ring */ 269 efx_nic_init_rx(rx_queue); 270 } 271 272 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 273 { 274 struct efx_rx_buffer *rx_buf; 275 int i; 276 277 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 278 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 279 280 del_timer_sync(&rx_queue->slow_fill); 281 282 /* Release RX buffers from the current read ptr to the write ptr */ 283 if (rx_queue->buffer) { 284 for (i = rx_queue->removed_count; i < rx_queue->added_count; 285 i++) { 286 unsigned int index = i & rx_queue->ptr_mask; 287 288 rx_buf = efx_rx_buffer(rx_queue, index); 289 efx_fini_rx_buffer(rx_queue, rx_buf); 290 } 291 } 292 293 efx_fini_rx_recycle_ring(rx_queue); 294 295 if (rx_queue->xdp_rxq_info_valid) 296 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); 297 298 rx_queue->xdp_rxq_info_valid = false; 299 } 300 301 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 302 { 303 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 304 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 305 306 efx_nic_remove_rx(rx_queue); 307 308 kfree(rx_queue->buffer); 309 rx_queue->buffer = NULL; 310 } 311 312 /* Unmap a DMA-mapped page. This function is only called for the final RX 313 * buffer in a page. 314 */ 315 void efx_unmap_rx_buffer(struct efx_nic *efx, 316 struct efx_rx_buffer *rx_buf) 317 { 318 struct page *page = rx_buf->page; 319 320 if (page) { 321 struct efx_rx_page_state *state = page_address(page); 322 323 dma_unmap_page(&efx->pci_dev->dev, 324 state->dma_addr, 325 PAGE_SIZE << efx->rx_buffer_order, 326 DMA_FROM_DEVICE); 327 } 328 } 329 330 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 331 struct efx_rx_buffer *rx_buf, 332 unsigned int num_bufs) 333 { 334 do { 335 if (rx_buf->page) { 336 put_page(rx_buf->page); 337 rx_buf->page = NULL; 338 } 339 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 340 } while (--num_bufs); 341 } 342 343 void efx_rx_slow_fill(struct timer_list *t) 344 { 345 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); 346 347 /* Post an event to cause NAPI to run and refill the queue */ 348 efx_nic_generate_fill_event(rx_queue); 349 ++rx_queue->slow_fill_count; 350 } 351 352 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 353 { 354 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); 355 } 356 357 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers 358 * 359 * @rx_queue: Efx RX queue 360 * 361 * This allocates a batch of pages, maps them for DMA, and populates 362 * struct efx_rx_buffers for each one. Return a negative error code or 363 * 0 on success. If a single page can be used for multiple buffers, 364 * then the page will either be inserted fully, or not at all. 365 */ 366 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) 367 { 368 unsigned int page_offset, index, count; 369 struct efx_nic *efx = rx_queue->efx; 370 struct efx_rx_page_state *state; 371 struct efx_rx_buffer *rx_buf; 372 dma_addr_t dma_addr; 373 struct page *page; 374 375 count = 0; 376 do { 377 page = efx_reuse_page(rx_queue); 378 if (page == NULL) { 379 page = alloc_pages(__GFP_COMP | 380 (atomic ? GFP_ATOMIC : GFP_KERNEL), 381 efx->rx_buffer_order); 382 if (unlikely(page == NULL)) 383 return -ENOMEM; 384 dma_addr = 385 dma_map_page(&efx->pci_dev->dev, page, 0, 386 PAGE_SIZE << efx->rx_buffer_order, 387 DMA_FROM_DEVICE); 388 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 389 dma_addr))) { 390 __free_pages(page, efx->rx_buffer_order); 391 return -EIO; 392 } 393 state = page_address(page); 394 state->dma_addr = dma_addr; 395 } else { 396 state = page_address(page); 397 dma_addr = state->dma_addr; 398 } 399 400 dma_addr += sizeof(struct efx_rx_page_state); 401 page_offset = sizeof(struct efx_rx_page_state); 402 403 do { 404 index = rx_queue->added_count & rx_queue->ptr_mask; 405 rx_buf = efx_rx_buffer(rx_queue, index); 406 rx_buf->dma_addr = dma_addr + efx->rx_ip_align + 407 EFX_XDP_HEADROOM; 408 rx_buf->page = page; 409 rx_buf->page_offset = page_offset + efx->rx_ip_align + 410 EFX_XDP_HEADROOM; 411 rx_buf->len = efx->rx_dma_len; 412 rx_buf->flags = 0; 413 ++rx_queue->added_count; 414 get_page(page); 415 dma_addr += efx->rx_page_buf_step; 416 page_offset += efx->rx_page_buf_step; 417 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); 418 419 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; 420 } while (++count < efx->rx_pages_per_batch); 421 422 return 0; 423 } 424 425 void efx_rx_config_page_split(struct efx_nic *efx) 426 { 427 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + 428 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM, 429 EFX_RX_BUF_ALIGNMENT); 430 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 431 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 432 efx->rx_page_buf_step); 433 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / 434 efx->rx_bufs_per_page; 435 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, 436 efx->rx_bufs_per_page); 437 } 438 439 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly 440 * @rx_queue: RX descriptor queue 441 * 442 * This will aim to fill the RX descriptor queue up to 443 * @rx_queue->@max_fill. If there is insufficient atomic 444 * memory to do so, a slow fill will be scheduled. 445 * 446 * The caller must provide serialisation (none is used here). In practise, 447 * this means this function must run from the NAPI handler, or be called 448 * when NAPI is disabled. 449 */ 450 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) 451 { 452 struct efx_nic *efx = rx_queue->efx; 453 unsigned int fill_level, batch_size; 454 int space, rc = 0; 455 456 if (!rx_queue->refill_enabled) 457 return; 458 459 /* Calculate current fill level, and exit if we don't need to fill */ 460 fill_level = (rx_queue->added_count - rx_queue->removed_count); 461 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); 462 if (fill_level >= rx_queue->fast_fill_trigger) 463 goto out; 464 465 /* Record minimum fill level */ 466 if (unlikely(fill_level < rx_queue->min_fill)) { 467 if (fill_level) 468 rx_queue->min_fill = fill_level; 469 } 470 471 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; 472 space = rx_queue->max_fill - fill_level; 473 EFX_WARN_ON_ONCE_PARANOID(space < batch_size); 474 475 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 476 "RX queue %d fast-filling descriptor ring from" 477 " level %d to level %d\n", 478 efx_rx_queue_index(rx_queue), fill_level, 479 rx_queue->max_fill); 480 481 do { 482 rc = efx_init_rx_buffers(rx_queue, atomic); 483 if (unlikely(rc)) { 484 /* Ensure that we don't leave the rx queue empty */ 485 efx_schedule_slow_fill(rx_queue); 486 goto out; 487 } 488 } while ((space -= batch_size) >= batch_size); 489 490 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 491 "RX queue %d fast-filled descriptor ring " 492 "to level %d\n", efx_rx_queue_index(rx_queue), 493 rx_queue->added_count - rx_queue->removed_count); 494 495 out: 496 if (rx_queue->notified_count != rx_queue->added_count) 497 efx_nic_notify_rx_desc(rx_queue); 498 } 499 500 /* Pass a received packet up through GRO. GRO can handle pages 501 * regardless of checksum state and skbs with a good checksum. 502 */ 503 void 504 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, 505 unsigned int n_frags, u8 *eh, __wsum csum) 506 { 507 struct napi_struct *napi = &channel->napi_str; 508 struct efx_nic *efx = channel->efx; 509 struct sk_buff *skb; 510 511 skb = napi_get_frags(napi); 512 if (unlikely(!skb)) { 513 struct efx_rx_queue *rx_queue; 514 515 rx_queue = efx_channel_get_rx_queue(channel); 516 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 517 return; 518 } 519 520 if (efx->net_dev->features & NETIF_F_RXHASH && 521 efx_rx_buf_hash_valid(efx, eh)) 522 skb_set_hash(skb, efx_rx_buf_hash(efx, eh), 523 PKT_HASH_TYPE_L3); 524 if (csum) { 525 skb->csum = csum; 526 skb->ip_summed = CHECKSUM_COMPLETE; 527 } else { 528 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 529 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 530 } 531 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); 532 533 for (;;) { 534 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 535 rx_buf->page, rx_buf->page_offset, 536 rx_buf->len); 537 rx_buf->page = NULL; 538 skb->len += rx_buf->len; 539 if (skb_shinfo(skb)->nr_frags == n_frags) 540 break; 541 542 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 543 } 544 545 skb->data_len = skb->len; 546 skb->truesize += n_frags * efx->rx_buffer_truesize; 547 548 skb_record_rx_queue(skb, channel->rx_queue.core_index); 549 550 napi_gro_frags(napi); 551 } 552 553 /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because 554 * (a) this is an infrequent control-plane operation and (b) n is small (max 64) 555 */ 556 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) 557 { 558 struct list_head *head = &efx->rss_context.list; 559 struct efx_rss_context *ctx, *new; 560 u32 id = 1; /* Don't use zero, that refers to the master RSS context */ 561 562 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 563 564 /* Search for first gap in the numbering */ 565 list_for_each_entry(ctx, head, list) { 566 if (ctx->user_id != id) 567 break; 568 id++; 569 /* Check for wrap. If this happens, we have nearly 2^32 570 * allocated RSS contexts, which seems unlikely. 571 */ 572 if (WARN_ON_ONCE(!id)) 573 return NULL; 574 } 575 576 /* Create the new entry */ 577 new = kmalloc(sizeof(*new), GFP_KERNEL); 578 if (!new) 579 return NULL; 580 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 581 new->rx_hash_udp_4tuple = false; 582 583 /* Insert the new entry into the gap */ 584 new->user_id = id; 585 list_add_tail(&new->list, &ctx->list); 586 return new; 587 } 588 589 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) 590 { 591 struct list_head *head = &efx->rss_context.list; 592 struct efx_rss_context *ctx; 593 594 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 595 596 list_for_each_entry(ctx, head, list) 597 if (ctx->user_id == id) 598 return ctx; 599 return NULL; 600 } 601 602 void efx_free_rss_context_entry(struct efx_rss_context *ctx) 603 { 604 list_del(&ctx->list); 605 kfree(ctx); 606 } 607 608 void efx_set_default_rx_indir_table(struct efx_nic *efx, 609 struct efx_rss_context *ctx) 610 { 611 size_t i; 612 613 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) 614 ctx->rx_indir_table[i] = 615 ethtool_rxfh_indir_default(i, efx->rss_spread); 616 } 617 618 /** 619 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient 620 * @spec: Specification to test 621 * 622 * Return: %true if the specification is a non-drop RX filter that 623 * matches a local MAC address I/G bit value of 1 or matches a local 624 * IPv4 or IPv6 address value in the respective multicast address 625 * range. Otherwise %false. 626 */ 627 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) 628 { 629 if (!(spec->flags & EFX_FILTER_FLAG_RX) || 630 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) 631 return false; 632 633 if (spec->match_flags & 634 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && 635 is_multicast_ether_addr(spec->loc_mac)) 636 return true; 637 638 if ((spec->match_flags & 639 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 640 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 641 if (spec->ether_type == htons(ETH_P_IP) && 642 ipv4_is_multicast(spec->loc_host[0])) 643 return true; 644 if (spec->ether_type == htons(ETH_P_IPV6) && 645 ((const u8 *)spec->loc_host)[0] == 0xff) 646 return true; 647 } 648 649 return false; 650 } 651 652 bool efx_filter_spec_equal(const struct efx_filter_spec *left, 653 const struct efx_filter_spec *right) 654 { 655 if ((left->match_flags ^ right->match_flags) | 656 ((left->flags ^ right->flags) & 657 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 658 return false; 659 660 return memcmp(&left->outer_vid, &right->outer_vid, 661 sizeof(struct efx_filter_spec) - 662 offsetof(struct efx_filter_spec, outer_vid)) == 0; 663 } 664 665 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) 666 { 667 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); 668 return jhash2((const u32 *)&spec->outer_vid, 669 (sizeof(struct efx_filter_spec) - 670 offsetof(struct efx_filter_spec, outer_vid)) / 4, 671 0); 672 } 673 674 #ifdef CONFIG_RFS_ACCEL 675 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, 676 bool *force) 677 { 678 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { 679 /* ARFS is currently updating this entry, leave it */ 680 return false; 681 } 682 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { 683 /* ARFS tried and failed to update this, so it's probably out 684 * of date. Remove the filter and the ARFS rule entry. 685 */ 686 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 687 *force = true; 688 return true; 689 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ 690 /* ARFS has moved on, so old filter is not needed. Since we did 691 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will 692 * not be removed by efx_rps_hash_del() subsequently. 693 */ 694 *force = true; 695 return true; 696 } 697 /* Remove it iff ARFS wants to. */ 698 return true; 699 } 700 701 static 702 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, 703 const struct efx_filter_spec *spec) 704 { 705 u32 hash = efx_filter_spec_hash(spec); 706 707 lockdep_assert_held(&efx->rps_hash_lock); 708 if (!efx->rps_hash_table) 709 return NULL; 710 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; 711 } 712 713 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, 714 const struct efx_filter_spec *spec) 715 { 716 struct efx_arfs_rule *rule; 717 struct hlist_head *head; 718 struct hlist_node *node; 719 720 head = efx_rps_hash_bucket(efx, spec); 721 if (!head) 722 return NULL; 723 hlist_for_each(node, head) { 724 rule = container_of(node, struct efx_arfs_rule, node); 725 if (efx_filter_spec_equal(spec, &rule->spec)) 726 return rule; 727 } 728 return NULL; 729 } 730 731 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, 732 const struct efx_filter_spec *spec, 733 bool *new) 734 { 735 struct efx_arfs_rule *rule; 736 struct hlist_head *head; 737 struct hlist_node *node; 738 739 head = efx_rps_hash_bucket(efx, spec); 740 if (!head) 741 return NULL; 742 hlist_for_each(node, head) { 743 rule = container_of(node, struct efx_arfs_rule, node); 744 if (efx_filter_spec_equal(spec, &rule->spec)) { 745 *new = false; 746 return rule; 747 } 748 } 749 rule = kmalloc(sizeof(*rule), GFP_ATOMIC); 750 *new = true; 751 if (rule) { 752 memcpy(&rule->spec, spec, sizeof(rule->spec)); 753 hlist_add_head(&rule->node, head); 754 } 755 return rule; 756 } 757 758 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) 759 { 760 struct efx_arfs_rule *rule; 761 struct hlist_head *head; 762 struct hlist_node *node; 763 764 head = efx_rps_hash_bucket(efx, spec); 765 if (WARN_ON(!head)) 766 return; 767 hlist_for_each(node, head) { 768 rule = container_of(node, struct efx_arfs_rule, node); 769 if (efx_filter_spec_equal(spec, &rule->spec)) { 770 /* Someone already reused the entry. We know that if 771 * this check doesn't fire (i.e. filter_id == REMOVING) 772 * then the REMOVING mark was put there by our caller, 773 * because caller is holding a lock on filter table and 774 * only holders of that lock set REMOVING. 775 */ 776 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) 777 return; 778 hlist_del(node); 779 kfree(rule); 780 return; 781 } 782 } 783 /* We didn't find it. */ 784 WARN_ON(1); 785 } 786 #endif 787 788 int efx_probe_filters(struct efx_nic *efx) 789 { 790 int rc; 791 792 mutex_lock(&efx->mac_lock); 793 down_write(&efx->filter_sem); 794 rc = efx->type->filter_table_probe(efx); 795 if (rc) 796 goto out_unlock; 797 798 #ifdef CONFIG_RFS_ACCEL 799 if (efx->type->offload_features & NETIF_F_NTUPLE) { 800 struct efx_channel *channel; 801 int i, success = 1; 802 803 efx_for_each_channel(channel, efx) { 804 channel->rps_flow_id = 805 kcalloc(efx->type->max_rx_ip_filters, 806 sizeof(*channel->rps_flow_id), 807 GFP_KERNEL); 808 if (!channel->rps_flow_id) 809 success = 0; 810 else 811 for (i = 0; 812 i < efx->type->max_rx_ip_filters; 813 ++i) 814 channel->rps_flow_id[i] = 815 RPS_FLOW_ID_INVALID; 816 channel->rfs_expire_index = 0; 817 channel->rfs_filter_count = 0; 818 } 819 820 if (!success) { 821 efx_for_each_channel(channel, efx) 822 kfree(channel->rps_flow_id); 823 efx->type->filter_table_remove(efx); 824 rc = -ENOMEM; 825 goto out_unlock; 826 } 827 } 828 #endif 829 out_unlock: 830 up_write(&efx->filter_sem); 831 mutex_unlock(&efx->mac_lock); 832 return rc; 833 } 834 835 void efx_remove_filters(struct efx_nic *efx) 836 { 837 #ifdef CONFIG_RFS_ACCEL 838 struct efx_channel *channel; 839 840 efx_for_each_channel(channel, efx) { 841 cancel_delayed_work_sync(&channel->filter_work); 842 kfree(channel->rps_flow_id); 843 channel->rps_flow_id = NULL; 844 } 845 #endif 846 down_write(&efx->filter_sem); 847 efx->type->filter_table_remove(efx); 848 up_write(&efx->filter_sem); 849 } 850 851 #ifdef CONFIG_RFS_ACCEL 852 853 static void efx_filter_rfs_work(struct work_struct *data) 854 { 855 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, 856 work); 857 struct efx_nic *efx = netdev_priv(req->net_dev); 858 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 859 int slot_idx = req - efx->rps_slot; 860 struct efx_arfs_rule *rule; 861 u16 arfs_id = 0; 862 int rc; 863 864 rc = efx->type->filter_insert(efx, &req->spec, true); 865 if (rc >= 0) 866 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ 867 rc %= efx->type->max_rx_ip_filters; 868 if (efx->rps_hash_table) { 869 spin_lock_bh(&efx->rps_hash_lock); 870 rule = efx_rps_hash_find(efx, &req->spec); 871 /* The rule might have already gone, if someone else's request 872 * for the same spec was already worked and then expired before 873 * we got around to our work. In that case we have nothing 874 * tying us to an arfs_id, meaning that as soon as the filter 875 * is considered for expiry it will be removed. 876 */ 877 if (rule) { 878 if (rc < 0) 879 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; 880 else 881 rule->filter_id = rc; 882 arfs_id = rule->arfs_id; 883 } 884 spin_unlock_bh(&efx->rps_hash_lock); 885 } 886 if (rc >= 0) { 887 /* Remember this so we can check whether to expire the filter 888 * later. 889 */ 890 mutex_lock(&efx->rps_mutex); 891 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) 892 channel->rfs_filter_count++; 893 channel->rps_flow_id[rc] = req->flow_id; 894 mutex_unlock(&efx->rps_mutex); 895 896 if (req->spec.ether_type == htons(ETH_P_IP)) 897 netif_info(efx, rx_status, efx->net_dev, 898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", 899 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 900 req->spec.rem_host, ntohs(req->spec.rem_port), 901 req->spec.loc_host, ntohs(req->spec.loc_port), 902 req->rxq_index, req->flow_id, rc, arfs_id); 903 else 904 netif_info(efx, rx_status, efx->net_dev, 905 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", 906 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 907 req->spec.rem_host, ntohs(req->spec.rem_port), 908 req->spec.loc_host, ntohs(req->spec.loc_port), 909 req->rxq_index, req->flow_id, rc, arfs_id); 910 channel->n_rfs_succeeded++; 911 } else { 912 if (req->spec.ether_type == htons(ETH_P_IP)) 913 netif_dbg(efx, rx_status, efx->net_dev, 914 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", 915 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 916 req->spec.rem_host, ntohs(req->spec.rem_port), 917 req->spec.loc_host, ntohs(req->spec.loc_port), 918 req->rxq_index, req->flow_id, rc, arfs_id); 919 else 920 netif_dbg(efx, rx_status, efx->net_dev, 921 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", 922 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 923 req->spec.rem_host, ntohs(req->spec.rem_port), 924 req->spec.loc_host, ntohs(req->spec.loc_port), 925 req->rxq_index, req->flow_id, rc, arfs_id); 926 channel->n_rfs_failed++; 927 /* We're overloading the NIC's filter tables, so let's do a 928 * chunk of extra expiry work. 929 */ 930 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, 931 100u)); 932 } 933 934 /* Release references */ 935 clear_bit(slot_idx, &efx->rps_slot_map); 936 dev_put(req->net_dev); 937 } 938 939 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 940 u16 rxq_index, u32 flow_id) 941 { 942 struct efx_nic *efx = netdev_priv(net_dev); 943 struct efx_async_filter_insertion *req; 944 struct efx_arfs_rule *rule; 945 struct flow_keys fk; 946 int slot_idx; 947 bool new; 948 int rc; 949 950 /* find a free slot */ 951 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) 952 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) 953 break; 954 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) 955 return -EBUSY; 956 957 if (flow_id == RPS_FLOW_ID_INVALID) { 958 rc = -EINVAL; 959 goto out_clear; 960 } 961 962 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { 963 rc = -EPROTONOSUPPORT; 964 goto out_clear; 965 } 966 967 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { 968 rc = -EPROTONOSUPPORT; 969 goto out_clear; 970 } 971 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { 972 rc = -EPROTONOSUPPORT; 973 goto out_clear; 974 } 975 976 req = efx->rps_slot + slot_idx; 977 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, 978 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 979 rxq_index); 980 req->spec.match_flags = 981 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 982 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 983 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 984 req->spec.ether_type = fk.basic.n_proto; 985 req->spec.ip_proto = fk.basic.ip_proto; 986 987 if (fk.basic.n_proto == htons(ETH_P_IP)) { 988 req->spec.rem_host[0] = fk.addrs.v4addrs.src; 989 req->spec.loc_host[0] = fk.addrs.v4addrs.dst; 990 } else { 991 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, 992 sizeof(struct in6_addr)); 993 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, 994 sizeof(struct in6_addr)); 995 } 996 997 req->spec.rem_port = fk.ports.src; 998 req->spec.loc_port = fk.ports.dst; 999 1000 if (efx->rps_hash_table) { 1001 /* Add it to ARFS hash table */ 1002 spin_lock(&efx->rps_hash_lock); 1003 rule = efx_rps_hash_add(efx, &req->spec, &new); 1004 if (!rule) { 1005 rc = -ENOMEM; 1006 goto out_unlock; 1007 } 1008 if (new) 1009 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; 1010 rc = rule->arfs_id; 1011 /* Skip if existing or pending filter already does the right thing */ 1012 if (!new && rule->rxq_index == rxq_index && 1013 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) 1014 goto out_unlock; 1015 rule->rxq_index = rxq_index; 1016 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; 1017 spin_unlock(&efx->rps_hash_lock); 1018 } else { 1019 /* Without an ARFS hash table, we just use arfs_id 0 for all 1020 * filters. This means if multiple flows hash to the same 1021 * flow_id, all but the most recently touched will be eligible 1022 * for expiry. 1023 */ 1024 rc = 0; 1025 } 1026 1027 /* Queue the request */ 1028 dev_hold(req->net_dev = net_dev); 1029 INIT_WORK(&req->work, efx_filter_rfs_work); 1030 req->rxq_index = rxq_index; 1031 req->flow_id = flow_id; 1032 schedule_work(&req->work); 1033 return rc; 1034 out_unlock: 1035 spin_unlock(&efx->rps_hash_lock); 1036 out_clear: 1037 clear_bit(slot_idx, &efx->rps_slot_map); 1038 return rc; 1039 } 1040 1041 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) 1042 { 1043 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 1044 struct efx_nic *efx = channel->efx; 1045 unsigned int index, size, start; 1046 u32 flow_id; 1047 1048 if (!mutex_trylock(&efx->rps_mutex)) 1049 return false; 1050 expire_one = efx->type->filter_rfs_expire_one; 1051 index = channel->rfs_expire_index; 1052 start = index; 1053 size = efx->type->max_rx_ip_filters; 1054 while (quota) { 1055 flow_id = channel->rps_flow_id[index]; 1056 1057 if (flow_id != RPS_FLOW_ID_INVALID) { 1058 quota--; 1059 if (expire_one(efx, flow_id, index)) { 1060 netif_info(efx, rx_status, efx->net_dev, 1061 "expired filter %d [channel %u flow %u]\n", 1062 index, channel->channel, flow_id); 1063 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; 1064 channel->rfs_filter_count--; 1065 } 1066 } 1067 if (++index == size) 1068 index = 0; 1069 /* If we were called with a quota that exceeds the total number 1070 * of filters in the table (which shouldn't happen, but could 1071 * if two callers race), ensure that we don't loop forever - 1072 * stop when we've examined every row of the table. 1073 */ 1074 if (index == start) 1075 break; 1076 } 1077 1078 channel->rfs_expire_index = index; 1079 mutex_unlock(&efx->rps_mutex); 1080 return true; 1081 } 1082 1083 #endif /* CONFIG_RFS_ACCEL */ 1084