1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/socket.h> 12 #include <linux/in.h> 13 #include <linux/slab.h> 14 #include <linux/ip.h> 15 #include <linux/ipv6.h> 16 #include <linux/tcp.h> 17 #include <linux/udp.h> 18 #include <linux/prefetch.h> 19 #include <linux/moduleparam.h> 20 #include <linux/iommu.h> 21 #include <net/ip.h> 22 #include <net/checksum.h> 23 #include "net_driver.h" 24 #include "efx.h" 25 #include "filter.h" 26 #include "nic.h" 27 #include "selftest.h" 28 #include "workarounds.h" 29 30 /* Preferred number of descriptors to fill at once */ 31 #define EFX_RX_PREFERRED_BATCH 8U 32 33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle 34 * ring, this number is divided by the number of buffers per page to calculate 35 * the number of pages to store in the RX page recycle ring. 36 */ 37 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096 38 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) 39 40 /* Size of buffer allocated for skb header area. */ 41 #define EFX_SKB_HEADERS 128u 42 43 /* This is the percentage fill level below which new RX descriptors 44 * will be added to the RX descriptor ring. 45 */ 46 static unsigned int rx_refill_threshold; 47 48 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ 49 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ 50 EFX_RX_USR_BUF_SIZE) 51 52 /* 53 * RX maximum head room required. 54 * 55 * This must be at least 1 to prevent overflow, plus one packet-worth 56 * to allow pipelined receives. 57 */ 58 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) 59 60 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) 61 { 62 return page_address(buf->page) + buf->page_offset; 63 } 64 65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) 66 { 67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); 69 #else 70 const u8 *data = eh + efx->rx_packet_hash_offset; 71 return (u32)data[0] | 72 (u32)data[1] << 8 | 73 (u32)data[2] << 16 | 74 (u32)data[3] << 24; 75 #endif 76 } 77 78 static inline struct efx_rx_buffer * 79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) 80 { 81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) 82 return efx_rx_buffer(rx_queue, 0); 83 else 84 return rx_buf + 1; 85 } 86 87 static inline void efx_sync_rx_buffer(struct efx_nic *efx, 88 struct efx_rx_buffer *rx_buf, 89 unsigned int len) 90 { 91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, 92 DMA_FROM_DEVICE); 93 } 94 95 void efx_rx_config_page_split(struct efx_nic *efx) 96 { 97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, 98 EFX_RX_BUF_ALIGNMENT); 99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 101 efx->rx_page_buf_step); 102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / 103 efx->rx_bufs_per_page; 104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, 105 efx->rx_bufs_per_page); 106 } 107 108 /* Check the RX page recycle ring for a page that can be reused. */ 109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) 110 { 111 struct efx_nic *efx = rx_queue->efx; 112 struct page *page; 113 struct efx_rx_page_state *state; 114 unsigned index; 115 116 index = rx_queue->page_remove & rx_queue->page_ptr_mask; 117 page = rx_queue->page_ring[index]; 118 if (page == NULL) 119 return NULL; 120 121 rx_queue->page_ring[index] = NULL; 122 /* page_remove cannot exceed page_add. */ 123 if (rx_queue->page_remove != rx_queue->page_add) 124 ++rx_queue->page_remove; 125 126 /* If page_count is 1 then we hold the only reference to this page. */ 127 if (page_count(page) == 1) { 128 ++rx_queue->page_recycle_count; 129 return page; 130 } else { 131 state = page_address(page); 132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 133 PAGE_SIZE << efx->rx_buffer_order, 134 DMA_FROM_DEVICE); 135 put_page(page); 136 ++rx_queue->page_recycle_failed; 137 } 138 139 return NULL; 140 } 141 142 /** 143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers 144 * 145 * @rx_queue: Efx RX queue 146 * 147 * This allocates a batch of pages, maps them for DMA, and populates 148 * struct efx_rx_buffers for each one. Return a negative error code or 149 * 0 on success. If a single page can be used for multiple buffers, 150 * then the page will either be inserted fully, or not at all. 151 */ 152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) 153 { 154 struct efx_nic *efx = rx_queue->efx; 155 struct efx_rx_buffer *rx_buf; 156 struct page *page; 157 unsigned int page_offset; 158 struct efx_rx_page_state *state; 159 dma_addr_t dma_addr; 160 unsigned index, count; 161 162 count = 0; 163 do { 164 page = efx_reuse_page(rx_queue); 165 if (page == NULL) { 166 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 167 efx->rx_buffer_order); 168 if (unlikely(page == NULL)) 169 return -ENOMEM; 170 dma_addr = 171 dma_map_page(&efx->pci_dev->dev, page, 0, 172 PAGE_SIZE << efx->rx_buffer_order, 173 DMA_FROM_DEVICE); 174 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 175 dma_addr))) { 176 __free_pages(page, efx->rx_buffer_order); 177 return -EIO; 178 } 179 state = page_address(page); 180 state->dma_addr = dma_addr; 181 } else { 182 state = page_address(page); 183 dma_addr = state->dma_addr; 184 } 185 186 dma_addr += sizeof(struct efx_rx_page_state); 187 page_offset = sizeof(struct efx_rx_page_state); 188 189 do { 190 index = rx_queue->added_count & rx_queue->ptr_mask; 191 rx_buf = efx_rx_buffer(rx_queue, index); 192 rx_buf->dma_addr = dma_addr + efx->rx_ip_align; 193 rx_buf->page = page; 194 rx_buf->page_offset = page_offset + efx->rx_ip_align; 195 rx_buf->len = efx->rx_dma_len; 196 rx_buf->flags = 0; 197 ++rx_queue->added_count; 198 get_page(page); 199 dma_addr += efx->rx_page_buf_step; 200 page_offset += efx->rx_page_buf_step; 201 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); 202 203 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; 204 } while (++count < efx->rx_pages_per_batch); 205 206 return 0; 207 } 208 209 /* Unmap a DMA-mapped page. This function is only called for the final RX 210 * buffer in a page. 211 */ 212 static void efx_unmap_rx_buffer(struct efx_nic *efx, 213 struct efx_rx_buffer *rx_buf) 214 { 215 struct page *page = rx_buf->page; 216 217 if (page) { 218 struct efx_rx_page_state *state = page_address(page); 219 dma_unmap_page(&efx->pci_dev->dev, 220 state->dma_addr, 221 PAGE_SIZE << efx->rx_buffer_order, 222 DMA_FROM_DEVICE); 223 } 224 } 225 226 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 227 { 228 if (rx_buf->page) { 229 put_page(rx_buf->page); 230 rx_buf->page = NULL; 231 } 232 } 233 234 /* Attempt to recycle the page if there is an RX recycle ring; the page can 235 * only be added if this is the final RX buffer, to prevent pages being used in 236 * the descriptor ring and appearing in the recycle ring simultaneously. 237 */ 238 static void efx_recycle_rx_page(struct efx_channel *channel, 239 struct efx_rx_buffer *rx_buf) 240 { 241 struct page *page = rx_buf->page; 242 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 243 struct efx_nic *efx = rx_queue->efx; 244 unsigned index; 245 246 /* Only recycle the page after processing the final buffer. */ 247 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) 248 return; 249 250 index = rx_queue->page_add & rx_queue->page_ptr_mask; 251 if (rx_queue->page_ring[index] == NULL) { 252 unsigned read_index = rx_queue->page_remove & 253 rx_queue->page_ptr_mask; 254 255 /* The next slot in the recycle ring is available, but 256 * increment page_remove if the read pointer currently 257 * points here. 258 */ 259 if (read_index == index) 260 ++rx_queue->page_remove; 261 rx_queue->page_ring[index] = page; 262 ++rx_queue->page_add; 263 return; 264 } 265 ++rx_queue->page_recycle_full; 266 efx_unmap_rx_buffer(efx, rx_buf); 267 put_page(rx_buf->page); 268 } 269 270 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 271 struct efx_rx_buffer *rx_buf) 272 { 273 /* Release the page reference we hold for the buffer. */ 274 if (rx_buf->page) 275 put_page(rx_buf->page); 276 277 /* If this is the last buffer in a page, unmap and free it. */ 278 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 279 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 280 efx_free_rx_buffer(rx_buf); 281 } 282 rx_buf->page = NULL; 283 } 284 285 /* Recycle the pages that are used by buffers that have just been received. */ 286 static void efx_recycle_rx_pages(struct efx_channel *channel, 287 struct efx_rx_buffer *rx_buf, 288 unsigned int n_frags) 289 { 290 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 291 292 do { 293 efx_recycle_rx_page(channel, rx_buf); 294 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 295 } while (--n_frags); 296 } 297 298 static void efx_discard_rx_packet(struct efx_channel *channel, 299 struct efx_rx_buffer *rx_buf, 300 unsigned int n_frags) 301 { 302 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 303 304 efx_recycle_rx_pages(channel, rx_buf, n_frags); 305 306 do { 307 efx_free_rx_buffer(rx_buf); 308 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 309 } while (--n_frags); 310 } 311 312 /** 313 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 314 * @rx_queue: RX descriptor queue 315 * 316 * This will aim to fill the RX descriptor queue up to 317 * @rx_queue->@max_fill. If there is insufficient atomic 318 * memory to do so, a slow fill will be scheduled. 319 * 320 * The caller must provide serialisation (none is used here). In practise, 321 * this means this function must run from the NAPI handler, or be called 322 * when NAPI is disabled. 323 */ 324 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 325 { 326 struct efx_nic *efx = rx_queue->efx; 327 unsigned int fill_level, batch_size; 328 int space, rc = 0; 329 330 if (!rx_queue->refill_enabled) 331 return; 332 333 /* Calculate current fill level, and exit if we don't need to fill */ 334 fill_level = (rx_queue->added_count - rx_queue->removed_count); 335 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 336 if (fill_level >= rx_queue->fast_fill_trigger) 337 goto out; 338 339 /* Record minimum fill level */ 340 if (unlikely(fill_level < rx_queue->min_fill)) { 341 if (fill_level) 342 rx_queue->min_fill = fill_level; 343 } 344 345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; 346 space = rx_queue->max_fill - fill_level; 347 EFX_BUG_ON_PARANOID(space < batch_size); 348 349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 350 "RX queue %d fast-filling descriptor ring from" 351 " level %d to level %d\n", 352 efx_rx_queue_index(rx_queue), fill_level, 353 rx_queue->max_fill); 354 355 356 do { 357 rc = efx_init_rx_buffers(rx_queue); 358 if (unlikely(rc)) { 359 /* Ensure that we don't leave the rx queue empty */ 360 if (rx_queue->added_count == rx_queue->removed_count) 361 efx_schedule_slow_fill(rx_queue); 362 goto out; 363 } 364 } while ((space -= batch_size) >= batch_size); 365 366 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 367 "RX queue %d fast-filled descriptor ring " 368 "to level %d\n", efx_rx_queue_index(rx_queue), 369 rx_queue->added_count - rx_queue->removed_count); 370 371 out: 372 if (rx_queue->notified_count != rx_queue->added_count) 373 efx_nic_notify_rx_desc(rx_queue); 374 } 375 376 void efx_rx_slow_fill(unsigned long context) 377 { 378 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 379 380 /* Post an event to cause NAPI to run and refill the queue */ 381 efx_nic_generate_fill_event(rx_queue); 382 ++rx_queue->slow_fill_count; 383 } 384 385 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 386 struct efx_rx_buffer *rx_buf, 387 int len) 388 { 389 struct efx_nic *efx = rx_queue->efx; 390 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 391 392 if (likely(len <= max_len)) 393 return; 394 395 /* The packet must be discarded, but this is only a fatal error 396 * if the caller indicated it was 397 */ 398 rx_buf->flags |= EFX_RX_PKT_DISCARD; 399 400 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 401 if (net_ratelimit()) 402 netif_err(efx, rx_err, efx->net_dev, 403 " RX queue %d seriously overlength " 404 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 405 efx_rx_queue_index(rx_queue), len, max_len, 406 efx->type->rx_buffer_padding); 407 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 408 } else { 409 if (net_ratelimit()) 410 netif_err(efx, rx_err, efx->net_dev, 411 " RX queue %d overlength RX event " 412 "(0x%x > 0x%x)\n", 413 efx_rx_queue_index(rx_queue), len, max_len); 414 } 415 416 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 417 } 418 419 /* Pass a received packet up through GRO. GRO can handle pages 420 * regardless of checksum state and skbs with a good checksum. 421 */ 422 static void 423 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, 424 unsigned int n_frags, u8 *eh) 425 { 426 struct napi_struct *napi = &channel->napi_str; 427 gro_result_t gro_result; 428 struct efx_nic *efx = channel->efx; 429 struct sk_buff *skb; 430 431 skb = napi_get_frags(napi); 432 if (unlikely(!skb)) { 433 while (n_frags--) { 434 put_page(rx_buf->page); 435 rx_buf->page = NULL; 436 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 437 } 438 return; 439 } 440 441 if (efx->net_dev->features & NETIF_F_RXHASH) 442 skb->rxhash = efx_rx_buf_hash(efx, eh); 443 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 444 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 445 446 for (;;) { 447 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 448 rx_buf->page, rx_buf->page_offset, 449 rx_buf->len); 450 rx_buf->page = NULL; 451 skb->len += rx_buf->len; 452 if (skb_shinfo(skb)->nr_frags == n_frags) 453 break; 454 455 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 456 } 457 458 skb->data_len = skb->len; 459 skb->truesize += n_frags * efx->rx_buffer_truesize; 460 461 skb_record_rx_queue(skb, channel->rx_queue.core_index); 462 463 gro_result = napi_gro_frags(napi); 464 if (gro_result != GRO_DROP) 465 channel->irq_mod_score += 2; 466 } 467 468 /* Allocate and construct an SKB around page fragments */ 469 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, 470 struct efx_rx_buffer *rx_buf, 471 unsigned int n_frags, 472 u8 *eh, int hdr_len) 473 { 474 struct efx_nic *efx = channel->efx; 475 struct sk_buff *skb; 476 477 /* Allocate an SKB to store the headers */ 478 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); 479 if (unlikely(skb == NULL)) 480 return NULL; 481 482 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); 483 484 skb_reserve(skb, EFX_PAGE_SKB_ALIGN); 485 memcpy(__skb_put(skb, hdr_len), eh, hdr_len); 486 487 /* Append the remaining page(s) onto the frag list */ 488 if (rx_buf->len > hdr_len) { 489 rx_buf->page_offset += hdr_len; 490 rx_buf->len -= hdr_len; 491 492 for (;;) { 493 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 494 rx_buf->page, rx_buf->page_offset, 495 rx_buf->len); 496 rx_buf->page = NULL; 497 skb->len += rx_buf->len; 498 skb->data_len += rx_buf->len; 499 if (skb_shinfo(skb)->nr_frags == n_frags) 500 break; 501 502 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 503 } 504 } else { 505 __free_pages(rx_buf->page, efx->rx_buffer_order); 506 rx_buf->page = NULL; 507 n_frags = 0; 508 } 509 510 skb->truesize += n_frags * efx->rx_buffer_truesize; 511 512 /* Move past the ethernet header */ 513 skb->protocol = eth_type_trans(skb, efx->net_dev); 514 515 return skb; 516 } 517 518 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 519 unsigned int n_frags, unsigned int len, u16 flags) 520 { 521 struct efx_nic *efx = rx_queue->efx; 522 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 523 struct efx_rx_buffer *rx_buf; 524 525 rx_buf = efx_rx_buffer(rx_queue, index); 526 rx_buf->flags |= flags; 527 528 /* Validate the number of fragments and completed length */ 529 if (n_frags == 1) { 530 if (!(flags & EFX_RX_PKT_PREFIX_LEN)) 531 efx_rx_packet__check_len(rx_queue, rx_buf, len); 532 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 533 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || 534 unlikely(len > n_frags * efx->rx_dma_len) || 535 unlikely(!efx->rx_scatter)) { 536 /* If this isn't an explicit discard request, either 537 * the hardware or the driver is broken. 538 */ 539 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); 540 rx_buf->flags |= EFX_RX_PKT_DISCARD; 541 } 542 543 netif_vdbg(efx, rx_status, efx->net_dev, 544 "RX queue %d received ids %x-%x len %d %s%s\n", 545 efx_rx_queue_index(rx_queue), index, 546 (index + n_frags - 1) & rx_queue->ptr_mask, len, 547 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 548 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 549 550 /* Discard packet, if instructed to do so. Process the 551 * previous receive first. 552 */ 553 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 554 efx_rx_flush_packet(channel); 555 efx_discard_rx_packet(channel, rx_buf, n_frags); 556 return; 557 } 558 559 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) 560 rx_buf->len = len; 561 562 /* Release and/or sync the DMA mapping - assumes all RX buffers 563 * consumed in-order per RX queue. 564 */ 565 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 566 567 /* Prefetch nice and early so data will (hopefully) be in cache by 568 * the time we look at it. 569 */ 570 prefetch(efx_rx_buf_va(rx_buf)); 571 572 rx_buf->page_offset += efx->rx_prefix_size; 573 rx_buf->len -= efx->rx_prefix_size; 574 575 if (n_frags > 1) { 576 /* Release/sync DMA mapping for additional fragments. 577 * Fix length for last fragment. 578 */ 579 unsigned int tail_frags = n_frags - 1; 580 581 for (;;) { 582 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 583 if (--tail_frags == 0) 584 break; 585 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); 586 } 587 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; 588 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 589 } 590 591 /* All fragments have been DMA-synced, so recycle pages. */ 592 rx_buf = efx_rx_buffer(rx_queue, index); 593 efx_recycle_rx_pages(channel, rx_buf, n_frags); 594 595 /* Pipeline receives so that we give time for packet headers to be 596 * prefetched into cache. 597 */ 598 efx_rx_flush_packet(channel); 599 channel->rx_pkt_n_frags = n_frags; 600 channel->rx_pkt_index = index; 601 } 602 603 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, 604 struct efx_rx_buffer *rx_buf, 605 unsigned int n_frags) 606 { 607 struct sk_buff *skb; 608 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); 609 610 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 611 if (unlikely(skb == NULL)) { 612 efx_free_rx_buffer(rx_buf); 613 return; 614 } 615 skb_record_rx_queue(skb, channel->rx_queue.core_index); 616 617 /* Set the SKB flags */ 618 skb_checksum_none_assert(skb); 619 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) 620 skb->ip_summed = CHECKSUM_UNNECESSARY; 621 622 if (channel->type->receive_skb) 623 if (channel->type->receive_skb(channel, skb)) 624 return; 625 626 /* Pass the packet up */ 627 netif_receive_skb(skb); 628 } 629 630 /* Handle a received packet. Second half: Touches packet payload. */ 631 void __efx_rx_packet(struct efx_channel *channel) 632 { 633 struct efx_nic *efx = channel->efx; 634 struct efx_rx_buffer *rx_buf = 635 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 636 u8 *eh = efx_rx_buf_va(rx_buf); 637 638 /* Read length from the prefix if necessary. This already 639 * excludes the length of the prefix itself. 640 */ 641 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) 642 rx_buf->len = le16_to_cpup((__le16 *) 643 (eh + efx->rx_packet_len_offset)); 644 645 /* If we're in loopback test, then pass the packet directly to the 646 * loopback layer, and free the rx_buf here 647 */ 648 if (unlikely(efx->loopback_selftest)) { 649 efx_loopback_rx_packet(efx, eh, rx_buf->len); 650 efx_free_rx_buffer(rx_buf); 651 goto out; 652 } 653 654 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 655 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 656 657 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) 658 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); 659 else 660 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); 661 out: 662 channel->rx_pkt_n_frags = 0; 663 } 664 665 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 666 { 667 struct efx_nic *efx = rx_queue->efx; 668 unsigned int entries; 669 int rc; 670 671 /* Create the smallest power-of-two aligned ring */ 672 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 673 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 674 rx_queue->ptr_mask = entries - 1; 675 676 netif_dbg(efx, probe, efx->net_dev, 677 "creating RX queue %d size %#x mask %#x\n", 678 efx_rx_queue_index(rx_queue), efx->rxq_entries, 679 rx_queue->ptr_mask); 680 681 /* Allocate RX buffers */ 682 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 683 GFP_KERNEL); 684 if (!rx_queue->buffer) 685 return -ENOMEM; 686 687 rc = efx_nic_probe_rx(rx_queue); 688 if (rc) { 689 kfree(rx_queue->buffer); 690 rx_queue->buffer = NULL; 691 } 692 693 return rc; 694 } 695 696 static void efx_init_rx_recycle_ring(struct efx_nic *efx, 697 struct efx_rx_queue *rx_queue) 698 { 699 unsigned int bufs_in_recycle_ring, page_ring_size; 700 701 /* Set the RX recycle ring size */ 702 #ifdef CONFIG_PPC64 703 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 704 #else 705 if (iommu_present(&pci_bus_type)) 706 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 707 else 708 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; 709 #endif /* CONFIG_PPC64 */ 710 711 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 712 efx->rx_bufs_per_page); 713 rx_queue->page_ring = kcalloc(page_ring_size, 714 sizeof(*rx_queue->page_ring), GFP_KERNEL); 715 rx_queue->page_ptr_mask = page_ring_size - 1; 716 } 717 718 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 719 { 720 struct efx_nic *efx = rx_queue->efx; 721 unsigned int max_fill, trigger, max_trigger; 722 723 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 724 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 725 726 /* Initialise ptr fields */ 727 rx_queue->added_count = 0; 728 rx_queue->notified_count = 0; 729 rx_queue->removed_count = 0; 730 rx_queue->min_fill = -1U; 731 efx_init_rx_recycle_ring(efx, rx_queue); 732 733 rx_queue->page_remove = 0; 734 rx_queue->page_add = rx_queue->page_ptr_mask + 1; 735 rx_queue->page_recycle_count = 0; 736 rx_queue->page_recycle_failed = 0; 737 rx_queue->page_recycle_full = 0; 738 739 /* Initialise limit fields */ 740 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 741 max_trigger = 742 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; 743 if (rx_refill_threshold != 0) { 744 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 745 if (trigger > max_trigger) 746 trigger = max_trigger; 747 } else { 748 trigger = max_trigger; 749 } 750 751 rx_queue->max_fill = max_fill; 752 rx_queue->fast_fill_trigger = trigger; 753 rx_queue->refill_enabled = true; 754 755 /* Set up RX descriptor ring */ 756 efx_nic_init_rx(rx_queue); 757 } 758 759 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 760 { 761 int i; 762 struct efx_nic *efx = rx_queue->efx; 763 struct efx_rx_buffer *rx_buf; 764 765 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 766 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 767 768 del_timer_sync(&rx_queue->slow_fill); 769 770 /* Release RX buffers from the current read ptr to the write ptr */ 771 if (rx_queue->buffer) { 772 for (i = rx_queue->removed_count; i < rx_queue->added_count; 773 i++) { 774 unsigned index = i & rx_queue->ptr_mask; 775 rx_buf = efx_rx_buffer(rx_queue, index); 776 efx_fini_rx_buffer(rx_queue, rx_buf); 777 } 778 } 779 780 /* Unmap and release the pages in the recycle ring. Remove the ring. */ 781 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 782 struct page *page = rx_queue->page_ring[i]; 783 struct efx_rx_page_state *state; 784 785 if (page == NULL) 786 continue; 787 788 state = page_address(page); 789 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 790 PAGE_SIZE << efx->rx_buffer_order, 791 DMA_FROM_DEVICE); 792 put_page(page); 793 } 794 kfree(rx_queue->page_ring); 795 rx_queue->page_ring = NULL; 796 } 797 798 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 799 { 800 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 801 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 802 803 efx_nic_remove_rx(rx_queue); 804 805 kfree(rx_queue->buffer); 806 rx_queue->buffer = NULL; 807 } 808 809 810 module_param(rx_refill_threshold, uint, 0444); 811 MODULE_PARM_DESC(rx_refill_threshold, 812 "RX descriptor ring refill threshold (%)"); 813 814 #ifdef CONFIG_RFS_ACCEL 815 816 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 817 u16 rxq_index, u32 flow_id) 818 { 819 struct efx_nic *efx = netdev_priv(net_dev); 820 struct efx_channel *channel; 821 struct efx_filter_spec spec; 822 const __be16 *ports; 823 __be16 ether_type; 824 int nhoff; 825 int rc; 826 827 /* The core RPS/RFS code has already parsed and validated 828 * VLAN, IP and transport headers. We assume they are in the 829 * header area. 830 */ 831 832 if (skb->protocol == htons(ETH_P_8021Q)) { 833 const struct vlan_hdr *vh = 834 (const struct vlan_hdr *)skb->data; 835 836 /* We can't filter on the IP 5-tuple and the vlan 837 * together, so just strip the vlan header and filter 838 * on the IP part. 839 */ 840 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); 841 ether_type = vh->h_vlan_encapsulated_proto; 842 nhoff = sizeof(struct vlan_hdr); 843 } else { 844 ether_type = skb->protocol; 845 nhoff = 0; 846 } 847 848 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) 849 return -EPROTONOSUPPORT; 850 851 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 852 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 853 rxq_index); 854 spec.match_flags = 855 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 856 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 857 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 858 spec.ether_type = ether_type; 859 860 if (ether_type == htons(ETH_P_IP)) { 861 const struct iphdr *ip = 862 (const struct iphdr *)(skb->data + nhoff); 863 864 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); 865 if (ip_is_fragment(ip)) 866 return -EPROTONOSUPPORT; 867 spec.ip_proto = ip->protocol; 868 spec.rem_host[0] = ip->saddr; 869 spec.loc_host[0] = ip->daddr; 870 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); 871 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 872 } else { 873 const struct ipv6hdr *ip6 = 874 (const struct ipv6hdr *)(skb->data + nhoff); 875 876 EFX_BUG_ON_PARANOID(skb_headlen(skb) < 877 nhoff + sizeof(*ip6) + 4); 878 spec.ip_proto = ip6->nexthdr; 879 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); 880 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); 881 ports = (const __be16 *)(ip6 + 1); 882 } 883 884 spec.rem_port = ports[0]; 885 spec.loc_port = ports[1]; 886 887 rc = efx->type->filter_rfs_insert(efx, &spec); 888 if (rc < 0) 889 return rc; 890 891 /* Remember this so we can check whether to expire the filter later */ 892 efx->rps_flow_id[rc] = flow_id; 893 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 894 ++channel->rfs_filters_added; 895 896 if (ether_type == htons(ETH_P_IP)) 897 netif_info(efx, rx_status, efx->net_dev, 898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 899 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 900 spec.rem_host, ntohs(ports[0]), spec.loc_host, 901 ntohs(ports[1]), rxq_index, flow_id, rc); 902 else 903 netif_info(efx, rx_status, efx->net_dev, 904 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 905 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 906 spec.rem_host, ntohs(ports[0]), spec.loc_host, 907 ntohs(ports[1]), rxq_index, flow_id, rc); 908 909 return rc; 910 } 911 912 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 913 { 914 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 915 unsigned int index, size; 916 u32 flow_id; 917 918 if (!spin_trylock_bh(&efx->filter_lock)) 919 return false; 920 921 expire_one = efx->type->filter_rfs_expire_one; 922 index = efx->rps_expire_index; 923 size = efx->type->max_rx_ip_filters; 924 while (quota--) { 925 flow_id = efx->rps_flow_id[index]; 926 if (expire_one(efx, flow_id, index)) 927 netif_info(efx, rx_status, efx->net_dev, 928 "expired filter %d [flow %u]\n", 929 index, flow_id); 930 if (++index == size) 931 index = 0; 932 } 933 efx->rps_expire_index = index; 934 935 spin_unlock_bh(&efx->filter_lock); 936 return true; 937 } 938 939 #endif /* CONFIG_RFS_ACCEL */ 940 941 /** 942 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient 943 * @spec: Specification to test 944 * 945 * Return: %true if the specification is a non-drop RX filter that 946 * matches a local MAC address I/G bit value of 1 or matches a local 947 * IPv4 or IPv6 address value in the respective multicast address 948 * range. Otherwise %false. 949 */ 950 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) 951 { 952 if (!(spec->flags & EFX_FILTER_FLAG_RX) || 953 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) 954 return false; 955 956 if (spec->match_flags & 957 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && 958 is_multicast_ether_addr(spec->loc_mac)) 959 return true; 960 961 if ((spec->match_flags & 962 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 963 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 964 if (spec->ether_type == htons(ETH_P_IP) && 965 ipv4_is_multicast(spec->loc_host[0])) 966 return true; 967 if (spec->ether_type == htons(ETH_P_IPV6) && 968 ((const u8 *)spec->loc_host)[0] == 0xff) 969 return true; 970 } 971 972 return false; 973 } 974