1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/socket.h> 12 #include <linux/in.h> 13 #include <linux/slab.h> 14 #include <linux/ip.h> 15 #include <linux/ipv6.h> 16 #include <linux/tcp.h> 17 #include <linux/udp.h> 18 #include <linux/prefetch.h> 19 #include <linux/moduleparam.h> 20 #include <linux/iommu.h> 21 #include <net/ip.h> 22 #include <net/checksum.h> 23 #include "net_driver.h" 24 #include "efx.h" 25 #include "filter.h" 26 #include "nic.h" 27 #include "selftest.h" 28 #include "workarounds.h" 29 30 /* Preferred number of descriptors to fill at once */ 31 #define EFX_RX_PREFERRED_BATCH 8U 32 33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle 34 * ring, this number is divided by the number of buffers per page to calculate 35 * the number of pages to store in the RX page recycle ring. 36 */ 37 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096 38 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) 39 40 /* Size of buffer allocated for skb header area. */ 41 #define EFX_SKB_HEADERS 128u 42 43 /* This is the percentage fill level below which new RX descriptors 44 * will be added to the RX descriptor ring. 45 */ 46 static unsigned int rx_refill_threshold; 47 48 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ 49 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ 50 EFX_RX_USR_BUF_SIZE) 51 52 /* 53 * RX maximum head room required. 54 * 55 * This must be at least 1 to prevent overflow, plus one packet-worth 56 * to allow pipelined receives. 57 */ 58 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) 59 60 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) 61 { 62 return page_address(buf->page) + buf->page_offset; 63 } 64 65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) 66 { 67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); 69 #else 70 const u8 *data = eh + efx->rx_packet_hash_offset; 71 return (u32)data[0] | 72 (u32)data[1] << 8 | 73 (u32)data[2] << 16 | 74 (u32)data[3] << 24; 75 #endif 76 } 77 78 static inline struct efx_rx_buffer * 79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) 80 { 81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) 82 return efx_rx_buffer(rx_queue, 0); 83 else 84 return rx_buf + 1; 85 } 86 87 static inline void efx_sync_rx_buffer(struct efx_nic *efx, 88 struct efx_rx_buffer *rx_buf, 89 unsigned int len) 90 { 91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, 92 DMA_FROM_DEVICE); 93 } 94 95 void efx_rx_config_page_split(struct efx_nic *efx) 96 { 97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, 98 EFX_RX_BUF_ALIGNMENT); 99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 101 efx->rx_page_buf_step); 102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / 103 efx->rx_bufs_per_page; 104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, 105 efx->rx_bufs_per_page); 106 } 107 108 /* Check the RX page recycle ring for a page that can be reused. */ 109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) 110 { 111 struct efx_nic *efx = rx_queue->efx; 112 struct page *page; 113 struct efx_rx_page_state *state; 114 unsigned index; 115 116 index = rx_queue->page_remove & rx_queue->page_ptr_mask; 117 page = rx_queue->page_ring[index]; 118 if (page == NULL) 119 return NULL; 120 121 rx_queue->page_ring[index] = NULL; 122 /* page_remove cannot exceed page_add. */ 123 if (rx_queue->page_remove != rx_queue->page_add) 124 ++rx_queue->page_remove; 125 126 /* If page_count is 1 then we hold the only reference to this page. */ 127 if (page_count(page) == 1) { 128 ++rx_queue->page_recycle_count; 129 return page; 130 } else { 131 state = page_address(page); 132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 133 PAGE_SIZE << efx->rx_buffer_order, 134 DMA_FROM_DEVICE); 135 put_page(page); 136 ++rx_queue->page_recycle_failed; 137 } 138 139 return NULL; 140 } 141 142 /** 143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers 144 * 145 * @rx_queue: Efx RX queue 146 * 147 * This allocates a batch of pages, maps them for DMA, and populates 148 * struct efx_rx_buffers for each one. Return a negative error code or 149 * 0 on success. If a single page can be used for multiple buffers, 150 * then the page will either be inserted fully, or not at all. 151 */ 152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) 153 { 154 struct efx_nic *efx = rx_queue->efx; 155 struct efx_rx_buffer *rx_buf; 156 struct page *page; 157 unsigned int page_offset; 158 struct efx_rx_page_state *state; 159 dma_addr_t dma_addr; 160 unsigned index, count; 161 162 count = 0; 163 do { 164 page = efx_reuse_page(rx_queue); 165 if (page == NULL) { 166 page = alloc_pages(__GFP_COLD | __GFP_COMP | 167 (atomic ? GFP_ATOMIC : GFP_KERNEL), 168 efx->rx_buffer_order); 169 if (unlikely(page == NULL)) 170 return -ENOMEM; 171 dma_addr = 172 dma_map_page(&efx->pci_dev->dev, page, 0, 173 PAGE_SIZE << efx->rx_buffer_order, 174 DMA_FROM_DEVICE); 175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 176 dma_addr))) { 177 __free_pages(page, efx->rx_buffer_order); 178 return -EIO; 179 } 180 state = page_address(page); 181 state->dma_addr = dma_addr; 182 } else { 183 state = page_address(page); 184 dma_addr = state->dma_addr; 185 } 186 187 dma_addr += sizeof(struct efx_rx_page_state); 188 page_offset = sizeof(struct efx_rx_page_state); 189 190 do { 191 index = rx_queue->added_count & rx_queue->ptr_mask; 192 rx_buf = efx_rx_buffer(rx_queue, index); 193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align; 194 rx_buf->page = page; 195 rx_buf->page_offset = page_offset + efx->rx_ip_align; 196 rx_buf->len = efx->rx_dma_len; 197 rx_buf->flags = 0; 198 ++rx_queue->added_count; 199 get_page(page); 200 dma_addr += efx->rx_page_buf_step; 201 page_offset += efx->rx_page_buf_step; 202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); 203 204 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; 205 } while (++count < efx->rx_pages_per_batch); 206 207 return 0; 208 } 209 210 /* Unmap a DMA-mapped page. This function is only called for the final RX 211 * buffer in a page. 212 */ 213 static void efx_unmap_rx_buffer(struct efx_nic *efx, 214 struct efx_rx_buffer *rx_buf) 215 { 216 struct page *page = rx_buf->page; 217 218 if (page) { 219 struct efx_rx_page_state *state = page_address(page); 220 dma_unmap_page(&efx->pci_dev->dev, 221 state->dma_addr, 222 PAGE_SIZE << efx->rx_buffer_order, 223 DMA_FROM_DEVICE); 224 } 225 } 226 227 static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, 228 struct efx_rx_buffer *rx_buf, 229 unsigned int num_bufs) 230 { 231 do { 232 if (rx_buf->page) { 233 put_page(rx_buf->page); 234 rx_buf->page = NULL; 235 } 236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 237 } while (--num_bufs); 238 } 239 240 /* Attempt to recycle the page if there is an RX recycle ring; the page can 241 * only be added if this is the final RX buffer, to prevent pages being used in 242 * the descriptor ring and appearing in the recycle ring simultaneously. 243 */ 244 static void efx_recycle_rx_page(struct efx_channel *channel, 245 struct efx_rx_buffer *rx_buf) 246 { 247 struct page *page = rx_buf->page; 248 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 249 struct efx_nic *efx = rx_queue->efx; 250 unsigned index; 251 252 /* Only recycle the page after processing the final buffer. */ 253 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) 254 return; 255 256 index = rx_queue->page_add & rx_queue->page_ptr_mask; 257 if (rx_queue->page_ring[index] == NULL) { 258 unsigned read_index = rx_queue->page_remove & 259 rx_queue->page_ptr_mask; 260 261 /* The next slot in the recycle ring is available, but 262 * increment page_remove if the read pointer currently 263 * points here. 264 */ 265 if (read_index == index) 266 ++rx_queue->page_remove; 267 rx_queue->page_ring[index] = page; 268 ++rx_queue->page_add; 269 return; 270 } 271 ++rx_queue->page_recycle_full; 272 efx_unmap_rx_buffer(efx, rx_buf); 273 put_page(rx_buf->page); 274 } 275 276 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 277 struct efx_rx_buffer *rx_buf) 278 { 279 /* Release the page reference we hold for the buffer. */ 280 if (rx_buf->page) 281 put_page(rx_buf->page); 282 283 /* If this is the last buffer in a page, unmap and free it. */ 284 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 286 efx_free_rx_buffers(rx_queue, rx_buf, 1); 287 } 288 rx_buf->page = NULL; 289 } 290 291 /* Recycle the pages that are used by buffers that have just been received. */ 292 static void efx_recycle_rx_pages(struct efx_channel *channel, 293 struct efx_rx_buffer *rx_buf, 294 unsigned int n_frags) 295 { 296 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 297 298 do { 299 efx_recycle_rx_page(channel, rx_buf); 300 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 301 } while (--n_frags); 302 } 303 304 static void efx_discard_rx_packet(struct efx_channel *channel, 305 struct efx_rx_buffer *rx_buf, 306 unsigned int n_frags) 307 { 308 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 309 310 efx_recycle_rx_pages(channel, rx_buf, n_frags); 311 312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 313 } 314 315 /** 316 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 317 * @rx_queue: RX descriptor queue 318 * 319 * This will aim to fill the RX descriptor queue up to 320 * @rx_queue->@max_fill. If there is insufficient atomic 321 * memory to do so, a slow fill will be scheduled. 322 * 323 * The caller must provide serialisation (none is used here). In practise, 324 * this means this function must run from the NAPI handler, or be called 325 * when NAPI is disabled. 326 */ 327 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) 328 { 329 struct efx_nic *efx = rx_queue->efx; 330 unsigned int fill_level, batch_size; 331 int space, rc = 0; 332 333 if (!rx_queue->refill_enabled) 334 return; 335 336 /* Calculate current fill level, and exit if we don't need to fill */ 337 fill_level = (rx_queue->added_count - rx_queue->removed_count); 338 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); 339 if (fill_level >= rx_queue->fast_fill_trigger) 340 goto out; 341 342 /* Record minimum fill level */ 343 if (unlikely(fill_level < rx_queue->min_fill)) { 344 if (fill_level) 345 rx_queue->min_fill = fill_level; 346 } 347 348 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; 349 space = rx_queue->max_fill - fill_level; 350 EFX_WARN_ON_ONCE_PARANOID(space < batch_size); 351 352 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 353 "RX queue %d fast-filling descriptor ring from" 354 " level %d to level %d\n", 355 efx_rx_queue_index(rx_queue), fill_level, 356 rx_queue->max_fill); 357 358 359 do { 360 rc = efx_init_rx_buffers(rx_queue, atomic); 361 if (unlikely(rc)) { 362 /* Ensure that we don't leave the rx queue empty */ 363 if (rx_queue->added_count == rx_queue->removed_count) 364 efx_schedule_slow_fill(rx_queue); 365 goto out; 366 } 367 } while ((space -= batch_size) >= batch_size); 368 369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 370 "RX queue %d fast-filled descriptor ring " 371 "to level %d\n", efx_rx_queue_index(rx_queue), 372 rx_queue->added_count - rx_queue->removed_count); 373 374 out: 375 if (rx_queue->notified_count != rx_queue->added_count) 376 efx_nic_notify_rx_desc(rx_queue); 377 } 378 379 void efx_rx_slow_fill(unsigned long context) 380 { 381 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 382 383 /* Post an event to cause NAPI to run and refill the queue */ 384 efx_nic_generate_fill_event(rx_queue); 385 ++rx_queue->slow_fill_count; 386 } 387 388 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 389 struct efx_rx_buffer *rx_buf, 390 int len) 391 { 392 struct efx_nic *efx = rx_queue->efx; 393 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 394 395 if (likely(len <= max_len)) 396 return; 397 398 /* The packet must be discarded, but this is only a fatal error 399 * if the caller indicated it was 400 */ 401 rx_buf->flags |= EFX_RX_PKT_DISCARD; 402 403 if (net_ratelimit()) 404 netif_err(efx, rx_err, efx->net_dev, 405 "RX queue %d overlength RX event (%#x > %#x)\n", 406 efx_rx_queue_index(rx_queue), len, max_len); 407 408 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 409 } 410 411 /* Pass a received packet up through GRO. GRO can handle pages 412 * regardless of checksum state and skbs with a good checksum. 413 */ 414 static void 415 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, 416 unsigned int n_frags, u8 *eh) 417 { 418 struct napi_struct *napi = &channel->napi_str; 419 gro_result_t gro_result; 420 struct efx_nic *efx = channel->efx; 421 struct sk_buff *skb; 422 423 skb = napi_get_frags(napi); 424 if (unlikely(!skb)) { 425 struct efx_rx_queue *rx_queue; 426 427 rx_queue = efx_channel_get_rx_queue(channel); 428 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 429 return; 430 } 431 432 if (efx->net_dev->features & NETIF_F_RXHASH) 433 skb_set_hash(skb, efx_rx_buf_hash(efx, eh), 434 PKT_HASH_TYPE_L3); 435 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 436 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 437 438 for (;;) { 439 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 440 rx_buf->page, rx_buf->page_offset, 441 rx_buf->len); 442 rx_buf->page = NULL; 443 skb->len += rx_buf->len; 444 if (skb_shinfo(skb)->nr_frags == n_frags) 445 break; 446 447 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 448 } 449 450 skb->data_len = skb->len; 451 skb->truesize += n_frags * efx->rx_buffer_truesize; 452 453 skb_record_rx_queue(skb, channel->rx_queue.core_index); 454 455 gro_result = napi_gro_frags(napi); 456 if (gro_result != GRO_DROP) 457 channel->irq_mod_score += 2; 458 } 459 460 /* Allocate and construct an SKB around page fragments */ 461 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, 462 struct efx_rx_buffer *rx_buf, 463 unsigned int n_frags, 464 u8 *eh, int hdr_len) 465 { 466 struct efx_nic *efx = channel->efx; 467 struct sk_buff *skb; 468 469 /* Allocate an SKB to store the headers */ 470 skb = netdev_alloc_skb(efx->net_dev, 471 efx->rx_ip_align + efx->rx_prefix_size + 472 hdr_len); 473 if (unlikely(skb == NULL)) { 474 atomic_inc(&efx->n_rx_noskb_drops); 475 return NULL; 476 } 477 478 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); 479 480 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, 481 efx->rx_prefix_size + hdr_len); 482 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); 483 __skb_put(skb, hdr_len); 484 485 /* Append the remaining page(s) onto the frag list */ 486 if (rx_buf->len > hdr_len) { 487 rx_buf->page_offset += hdr_len; 488 rx_buf->len -= hdr_len; 489 490 for (;;) { 491 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 492 rx_buf->page, rx_buf->page_offset, 493 rx_buf->len); 494 rx_buf->page = NULL; 495 skb->len += rx_buf->len; 496 skb->data_len += rx_buf->len; 497 if (skb_shinfo(skb)->nr_frags == n_frags) 498 break; 499 500 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 501 } 502 } else { 503 __free_pages(rx_buf->page, efx->rx_buffer_order); 504 rx_buf->page = NULL; 505 n_frags = 0; 506 } 507 508 skb->truesize += n_frags * efx->rx_buffer_truesize; 509 510 /* Move past the ethernet header */ 511 skb->protocol = eth_type_trans(skb, efx->net_dev); 512 513 skb_mark_napi_id(skb, &channel->napi_str); 514 515 return skb; 516 } 517 518 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 519 unsigned int n_frags, unsigned int len, u16 flags) 520 { 521 struct efx_nic *efx = rx_queue->efx; 522 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 523 struct efx_rx_buffer *rx_buf; 524 525 rx_queue->rx_packets++; 526 527 rx_buf = efx_rx_buffer(rx_queue, index); 528 rx_buf->flags |= flags; 529 530 /* Validate the number of fragments and completed length */ 531 if (n_frags == 1) { 532 if (!(flags & EFX_RX_PKT_PREFIX_LEN)) 533 efx_rx_packet__check_len(rx_queue, rx_buf, len); 534 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 535 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || 536 unlikely(len > n_frags * efx->rx_dma_len) || 537 unlikely(!efx->rx_scatter)) { 538 /* If this isn't an explicit discard request, either 539 * the hardware or the driver is broken. 540 */ 541 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); 542 rx_buf->flags |= EFX_RX_PKT_DISCARD; 543 } 544 545 netif_vdbg(efx, rx_status, efx->net_dev, 546 "RX queue %d received ids %x-%x len %d %s%s\n", 547 efx_rx_queue_index(rx_queue), index, 548 (index + n_frags - 1) & rx_queue->ptr_mask, len, 549 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 550 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 551 552 /* Discard packet, if instructed to do so. Process the 553 * previous receive first. 554 */ 555 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 556 efx_rx_flush_packet(channel); 557 efx_discard_rx_packet(channel, rx_buf, n_frags); 558 return; 559 } 560 561 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) 562 rx_buf->len = len; 563 564 /* Release and/or sync the DMA mapping - assumes all RX buffers 565 * consumed in-order per RX queue. 566 */ 567 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 568 569 /* Prefetch nice and early so data will (hopefully) be in cache by 570 * the time we look at it. 571 */ 572 prefetch(efx_rx_buf_va(rx_buf)); 573 574 rx_buf->page_offset += efx->rx_prefix_size; 575 rx_buf->len -= efx->rx_prefix_size; 576 577 if (n_frags > 1) { 578 /* Release/sync DMA mapping for additional fragments. 579 * Fix length for last fragment. 580 */ 581 unsigned int tail_frags = n_frags - 1; 582 583 for (;;) { 584 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 585 if (--tail_frags == 0) 586 break; 587 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); 588 } 589 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; 590 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 591 } 592 593 /* All fragments have been DMA-synced, so recycle pages. */ 594 rx_buf = efx_rx_buffer(rx_queue, index); 595 efx_recycle_rx_pages(channel, rx_buf, n_frags); 596 597 /* Pipeline receives so that we give time for packet headers to be 598 * prefetched into cache. 599 */ 600 efx_rx_flush_packet(channel); 601 channel->rx_pkt_n_frags = n_frags; 602 channel->rx_pkt_index = index; 603 } 604 605 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, 606 struct efx_rx_buffer *rx_buf, 607 unsigned int n_frags) 608 { 609 struct sk_buff *skb; 610 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); 611 612 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 613 if (unlikely(skb == NULL)) { 614 struct efx_rx_queue *rx_queue; 615 616 rx_queue = efx_channel_get_rx_queue(channel); 617 efx_free_rx_buffers(rx_queue, rx_buf, n_frags); 618 return; 619 } 620 skb_record_rx_queue(skb, channel->rx_queue.core_index); 621 622 /* Set the SKB flags */ 623 skb_checksum_none_assert(skb); 624 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) 625 skb->ip_summed = CHECKSUM_UNNECESSARY; 626 627 efx_rx_skb_attach_timestamp(channel, skb); 628 629 if (channel->type->receive_skb) 630 if (channel->type->receive_skb(channel, skb)) 631 return; 632 633 /* Pass the packet up */ 634 netif_receive_skb(skb); 635 } 636 637 /* Handle a received packet. Second half: Touches packet payload. */ 638 void __efx_rx_packet(struct efx_channel *channel) 639 { 640 struct efx_nic *efx = channel->efx; 641 struct efx_rx_buffer *rx_buf = 642 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 643 u8 *eh = efx_rx_buf_va(rx_buf); 644 645 /* Read length from the prefix if necessary. This already 646 * excludes the length of the prefix itself. 647 */ 648 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) 649 rx_buf->len = le16_to_cpup((__le16 *) 650 (eh + efx->rx_packet_len_offset)); 651 652 /* If we're in loopback test, then pass the packet directly to the 653 * loopback layer, and free the rx_buf here 654 */ 655 if (unlikely(efx->loopback_selftest)) { 656 struct efx_rx_queue *rx_queue; 657 658 efx_loopback_rx_packet(efx, eh, rx_buf->len); 659 rx_queue = efx_channel_get_rx_queue(channel); 660 efx_free_rx_buffers(rx_queue, rx_buf, 661 channel->rx_pkt_n_frags); 662 goto out; 663 } 664 665 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 666 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 667 668 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && 669 !efx_channel_busy_polling(channel)) 670 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); 671 else 672 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); 673 out: 674 channel->rx_pkt_n_frags = 0; 675 } 676 677 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 678 { 679 struct efx_nic *efx = rx_queue->efx; 680 unsigned int entries; 681 int rc; 682 683 /* Create the smallest power-of-two aligned ring */ 684 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 685 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 686 rx_queue->ptr_mask = entries - 1; 687 688 netif_dbg(efx, probe, efx->net_dev, 689 "creating RX queue %d size %#x mask %#x\n", 690 efx_rx_queue_index(rx_queue), efx->rxq_entries, 691 rx_queue->ptr_mask); 692 693 /* Allocate RX buffers */ 694 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 695 GFP_KERNEL); 696 if (!rx_queue->buffer) 697 return -ENOMEM; 698 699 rc = efx_nic_probe_rx(rx_queue); 700 if (rc) { 701 kfree(rx_queue->buffer); 702 rx_queue->buffer = NULL; 703 } 704 705 return rc; 706 } 707 708 static void efx_init_rx_recycle_ring(struct efx_nic *efx, 709 struct efx_rx_queue *rx_queue) 710 { 711 unsigned int bufs_in_recycle_ring, page_ring_size; 712 713 /* Set the RX recycle ring size */ 714 #ifdef CONFIG_PPC64 715 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 716 #else 717 if (iommu_present(&pci_bus_type)) 718 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 719 else 720 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; 721 #endif /* CONFIG_PPC64 */ 722 723 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 724 efx->rx_bufs_per_page); 725 rx_queue->page_ring = kcalloc(page_ring_size, 726 sizeof(*rx_queue->page_ring), GFP_KERNEL); 727 rx_queue->page_ptr_mask = page_ring_size - 1; 728 } 729 730 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 731 { 732 struct efx_nic *efx = rx_queue->efx; 733 unsigned int max_fill, trigger, max_trigger; 734 735 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 736 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 737 738 /* Initialise ptr fields */ 739 rx_queue->added_count = 0; 740 rx_queue->notified_count = 0; 741 rx_queue->removed_count = 0; 742 rx_queue->min_fill = -1U; 743 efx_init_rx_recycle_ring(efx, rx_queue); 744 745 rx_queue->page_remove = 0; 746 rx_queue->page_add = rx_queue->page_ptr_mask + 1; 747 rx_queue->page_recycle_count = 0; 748 rx_queue->page_recycle_failed = 0; 749 rx_queue->page_recycle_full = 0; 750 751 /* Initialise limit fields */ 752 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 753 max_trigger = 754 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; 755 if (rx_refill_threshold != 0) { 756 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 757 if (trigger > max_trigger) 758 trigger = max_trigger; 759 } else { 760 trigger = max_trigger; 761 } 762 763 rx_queue->max_fill = max_fill; 764 rx_queue->fast_fill_trigger = trigger; 765 rx_queue->refill_enabled = true; 766 767 /* Set up RX descriptor ring */ 768 efx_nic_init_rx(rx_queue); 769 } 770 771 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 772 { 773 int i; 774 struct efx_nic *efx = rx_queue->efx; 775 struct efx_rx_buffer *rx_buf; 776 777 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 778 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 779 780 del_timer_sync(&rx_queue->slow_fill); 781 782 /* Release RX buffers from the current read ptr to the write ptr */ 783 if (rx_queue->buffer) { 784 for (i = rx_queue->removed_count; i < rx_queue->added_count; 785 i++) { 786 unsigned index = i & rx_queue->ptr_mask; 787 rx_buf = efx_rx_buffer(rx_queue, index); 788 efx_fini_rx_buffer(rx_queue, rx_buf); 789 } 790 } 791 792 /* Unmap and release the pages in the recycle ring. Remove the ring. */ 793 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 794 struct page *page = rx_queue->page_ring[i]; 795 struct efx_rx_page_state *state; 796 797 if (page == NULL) 798 continue; 799 800 state = page_address(page); 801 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 802 PAGE_SIZE << efx->rx_buffer_order, 803 DMA_FROM_DEVICE); 804 put_page(page); 805 } 806 kfree(rx_queue->page_ring); 807 rx_queue->page_ring = NULL; 808 } 809 810 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 811 { 812 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 813 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 814 815 efx_nic_remove_rx(rx_queue); 816 817 kfree(rx_queue->buffer); 818 rx_queue->buffer = NULL; 819 } 820 821 822 module_param(rx_refill_threshold, uint, 0444); 823 MODULE_PARM_DESC(rx_refill_threshold, 824 "RX descriptor ring refill threshold (%)"); 825 826 #ifdef CONFIG_RFS_ACCEL 827 828 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 829 u16 rxq_index, u32 flow_id) 830 { 831 struct efx_nic *efx = netdev_priv(net_dev); 832 struct efx_channel *channel; 833 struct efx_filter_spec spec; 834 struct flow_keys fk; 835 int rc; 836 837 if (flow_id == RPS_FLOW_ID_INVALID) 838 return -EINVAL; 839 840 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 841 return -EPROTONOSUPPORT; 842 843 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) 844 return -EPROTONOSUPPORT; 845 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) 846 return -EPROTONOSUPPORT; 847 848 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 849 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 850 rxq_index); 851 spec.match_flags = 852 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 853 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 854 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 855 spec.ether_type = fk.basic.n_proto; 856 spec.ip_proto = fk.basic.ip_proto; 857 858 if (fk.basic.n_proto == htons(ETH_P_IP)) { 859 spec.rem_host[0] = fk.addrs.v4addrs.src; 860 spec.loc_host[0] = fk.addrs.v4addrs.dst; 861 } else { 862 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); 863 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); 864 } 865 866 spec.rem_port = fk.ports.src; 867 spec.loc_port = fk.ports.dst; 868 869 rc = efx->type->filter_rfs_insert(efx, &spec); 870 if (rc < 0) 871 return rc; 872 873 /* Remember this so we can check whether to expire the filter later */ 874 channel = efx_get_channel(efx, rxq_index); 875 channel->rps_flow_id[rc] = flow_id; 876 ++channel->rfs_filters_added; 877 878 if (spec.ether_type == htons(ETH_P_IP)) 879 netif_info(efx, rx_status, efx->net_dev, 880 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 881 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 882 spec.rem_host, ntohs(spec.rem_port), spec.loc_host, 883 ntohs(spec.loc_port), rxq_index, flow_id, rc); 884 else 885 netif_info(efx, rx_status, efx->net_dev, 886 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 887 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 888 spec.rem_host, ntohs(spec.rem_port), spec.loc_host, 889 ntohs(spec.loc_port), rxq_index, flow_id, rc); 890 891 return rc; 892 } 893 894 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 895 { 896 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 897 unsigned int channel_idx, index, size; 898 u32 flow_id; 899 900 if (!spin_trylock_bh(&efx->filter_lock)) 901 return false; 902 903 expire_one = efx->type->filter_rfs_expire_one; 904 channel_idx = efx->rps_expire_channel; 905 index = efx->rps_expire_index; 906 size = efx->type->max_rx_ip_filters; 907 while (quota--) { 908 struct efx_channel *channel = efx_get_channel(efx, channel_idx); 909 flow_id = channel->rps_flow_id[index]; 910 911 if (flow_id != RPS_FLOW_ID_INVALID && 912 expire_one(efx, flow_id, index)) { 913 netif_info(efx, rx_status, efx->net_dev, 914 "expired filter %d [queue %u flow %u]\n", 915 index, channel_idx, flow_id); 916 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; 917 } 918 if (++index == size) { 919 if (++channel_idx == efx->n_channels) 920 channel_idx = 0; 921 index = 0; 922 } 923 } 924 efx->rps_expire_channel = channel_idx; 925 efx->rps_expire_index = index; 926 927 spin_unlock_bh(&efx->filter_lock); 928 return true; 929 } 930 931 #endif /* CONFIG_RFS_ACCEL */ 932 933 /** 934 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient 935 * @spec: Specification to test 936 * 937 * Return: %true if the specification is a non-drop RX filter that 938 * matches a local MAC address I/G bit value of 1 or matches a local 939 * IPv4 or IPv6 address value in the respective multicast address 940 * range. Otherwise %false. 941 */ 942 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) 943 { 944 if (!(spec->flags & EFX_FILTER_FLAG_RX) || 945 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) 946 return false; 947 948 if (spec->match_flags & 949 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && 950 is_multicast_ether_addr(spec->loc_mac)) 951 return true; 952 953 if ((spec->match_flags & 954 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 955 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 956 if (spec->ether_type == htons(ETH_P_IP) && 957 ipv4_is_multicast(spec->loc_host[0])) 958 return true; 959 if (spec->ether_type == htons(ETH_P_IPV6) && 960 ((const u8 *)spec->loc_host)[0] == 0xff) 961 return true; 962 } 963 964 return false; 965 } 966