1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/socket.h> 12 #include <linux/in.h> 13 #include <linux/slab.h> 14 #include <linux/ip.h> 15 #include <linux/tcp.h> 16 #include <linux/udp.h> 17 #include <linux/prefetch.h> 18 #include <linux/moduleparam.h> 19 #include <net/ip.h> 20 #include <net/checksum.h> 21 #include "net_driver.h" 22 #include "efx.h" 23 #include "nic.h" 24 #include "selftest.h" 25 #include "workarounds.h" 26 27 /* Number of RX descriptors pushed at once. */ 28 #define EFX_RX_BATCH 8 29 30 /* Maximum size of a buffer sharing a page */ 31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) 32 33 /* Size of buffer allocated for skb header area. */ 34 #define EFX_SKB_HEADERS 64u 35 36 /* 37 * rx_alloc_method - RX buffer allocation method 38 * 39 * This driver supports two methods for allocating and using RX buffers: 40 * each RX buffer may be backed by an skb or by an order-n page. 41 * 42 * When GRO is in use then the second method has a lower overhead, 43 * since we don't have to allocate then free skbs on reassembled frames. 44 * 45 * Values: 46 * - RX_ALLOC_METHOD_AUTO = 0 47 * - RX_ALLOC_METHOD_SKB = 1 48 * - RX_ALLOC_METHOD_PAGE = 2 49 * 50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count 51 * controlled by the parameters below. 52 * 53 * - Since pushing and popping descriptors are separated by the rx_queue 54 * size, so the watermarks should be ~rxd_size. 55 * - The performance win by using page-based allocation for GRO is less 56 * than the performance hit of using page-based allocation of non-GRO, 57 * so the watermarks should reflect this. 58 * 59 * Per channel we maintain a single variable, updated by each channel: 60 * 61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : 62 * RX_ALLOC_FACTOR_SKB) 63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which 64 * limits the hysteresis), and update the allocation strategy: 65 * 66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? 67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 68 */ 69 static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; 70 71 #define RX_ALLOC_LEVEL_GRO 0x2000 72 #define RX_ALLOC_LEVEL_MAX 0x3000 73 #define RX_ALLOC_FACTOR_GRO 1 74 #define RX_ALLOC_FACTOR_SKB (-2) 75 76 /* This is the percentage fill level below which new RX descriptors 77 * will be added to the RX descriptor ring. 78 */ 79 static unsigned int rx_refill_threshold; 80 81 /* 82 * RX maximum head room required. 83 * 84 * This must be at least 1 to prevent overflow and at least 2 to allow 85 * pipelined receives. 86 */ 87 #define EFX_RXD_HEAD_ROOM 2 88 89 /* Offset of ethernet header within page */ 90 static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, 91 struct efx_rx_buffer *buf) 92 { 93 /* Offset is always within one page, so we don't need to consider 94 * the page order. 95 */ 96 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + 97 efx->type->rx_buffer_hash_size; 98 } 99 static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 100 { 101 return PAGE_SIZE << efx->rx_buffer_order; 102 } 103 104 static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) 105 { 106 if (buf->flags & EFX_RX_BUF_PAGE) 107 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); 108 else 109 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; 110 } 111 112 static inline u32 efx_rx_buf_hash(const u8 *eh) 113 { 114 /* The ethernet header is always directly after any hash. */ 115 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 116 return __le32_to_cpup((const __le32 *)(eh - 4)); 117 #else 118 const u8 *data = eh - 4; 119 return (u32)data[0] | 120 (u32)data[1] << 8 | 121 (u32)data[2] << 16 | 122 (u32)data[3] << 24; 123 #endif 124 } 125 126 /** 127 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers 128 * 129 * @rx_queue: Efx RX queue 130 * 131 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a 132 * struct efx_rx_buffer for each one. Return a negative error code or 0 133 * on success. May fail having only inserted fewer than EFX_RX_BATCH 134 * buffers. 135 */ 136 static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) 137 { 138 struct efx_nic *efx = rx_queue->efx; 139 struct net_device *net_dev = efx->net_dev; 140 struct efx_rx_buffer *rx_buf; 141 struct sk_buff *skb; 142 int skb_len = efx->rx_buffer_len; 143 unsigned index, count; 144 145 for (count = 0; count < EFX_RX_BATCH; ++count) { 146 index = rx_queue->added_count & rx_queue->ptr_mask; 147 rx_buf = efx_rx_buffer(rx_queue, index); 148 149 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); 150 if (unlikely(!skb)) 151 return -ENOMEM; 152 153 /* Adjust the SKB for padding */ 154 skb_reserve(skb, NET_IP_ALIGN); 155 rx_buf->len = skb_len - NET_IP_ALIGN; 156 rx_buf->flags = 0; 157 158 rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, 159 skb->data, rx_buf->len, 160 DMA_FROM_DEVICE); 161 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 162 rx_buf->dma_addr))) { 163 dev_kfree_skb_any(skb); 164 rx_buf->u.skb = NULL; 165 return -EIO; 166 } 167 168 ++rx_queue->added_count; 169 ++rx_queue->alloc_skb_count; 170 } 171 172 return 0; 173 } 174 175 /** 176 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers 177 * 178 * @rx_queue: Efx RX queue 179 * 180 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, 181 * and populates struct efx_rx_buffers for each one. Return a negative error 182 * code or 0 on success. If a single page can be split between two buffers, 183 * then the page will either be inserted fully, or not at at all. 184 */ 185 static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) 186 { 187 struct efx_nic *efx = rx_queue->efx; 188 struct efx_rx_buffer *rx_buf; 189 struct page *page; 190 void *page_addr; 191 struct efx_rx_page_state *state; 192 dma_addr_t dma_addr; 193 unsigned index, count; 194 195 /* We can split a page between two buffers */ 196 BUILD_BUG_ON(EFX_RX_BATCH & 1); 197 198 for (count = 0; count < EFX_RX_BATCH; ++count) { 199 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 200 efx->rx_buffer_order); 201 if (unlikely(page == NULL)) 202 return -ENOMEM; 203 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, 204 efx_rx_buf_size(efx), 205 DMA_FROM_DEVICE); 206 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { 207 __free_pages(page, efx->rx_buffer_order); 208 return -EIO; 209 } 210 page_addr = page_address(page); 211 state = page_addr; 212 state->refcnt = 0; 213 state->dma_addr = dma_addr; 214 215 page_addr += sizeof(struct efx_rx_page_state); 216 dma_addr += sizeof(struct efx_rx_page_state); 217 218 split: 219 index = rx_queue->added_count & rx_queue->ptr_mask; 220 rx_buf = efx_rx_buffer(rx_queue, index); 221 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 222 rx_buf->u.page = page; 223 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 224 rx_buf->flags = EFX_RX_BUF_PAGE; 225 ++rx_queue->added_count; 226 ++rx_queue->alloc_page_count; 227 ++state->refcnt; 228 229 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { 230 /* Use the second half of the page */ 231 get_page(page); 232 dma_addr += (PAGE_SIZE >> 1); 233 page_addr += (PAGE_SIZE >> 1); 234 ++count; 235 goto split; 236 } 237 } 238 239 return 0; 240 } 241 242 static void efx_unmap_rx_buffer(struct efx_nic *efx, 243 struct efx_rx_buffer *rx_buf) 244 { 245 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 246 struct efx_rx_page_state *state; 247 248 state = page_address(rx_buf->u.page); 249 if (--state->refcnt == 0) { 250 dma_unmap_page(&efx->pci_dev->dev, 251 state->dma_addr, 252 efx_rx_buf_size(efx), 253 DMA_FROM_DEVICE); 254 } 255 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 256 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, 257 rx_buf->len, DMA_FROM_DEVICE); 258 } 259 } 260 261 static void efx_free_rx_buffer(struct efx_nic *efx, 262 struct efx_rx_buffer *rx_buf) 263 { 264 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 265 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 266 rx_buf->u.page = NULL; 267 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 268 dev_kfree_skb_any(rx_buf->u.skb); 269 rx_buf->u.skb = NULL; 270 } 271 } 272 273 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274 struct efx_rx_buffer *rx_buf) 275 { 276 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 277 efx_free_rx_buffer(rx_queue->efx, rx_buf); 278 } 279 280 /* Attempt to resurrect the other receive buffer that used to share this page, 281 * which had previously been passed up to the kernel and freed. */ 282 static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, 283 struct efx_rx_buffer *rx_buf) 284 { 285 struct efx_rx_page_state *state = page_address(rx_buf->u.page); 286 struct efx_rx_buffer *new_buf; 287 unsigned fill_level, index; 288 289 /* +1 because efx_rx_packet() incremented removed_count. +1 because 290 * we'd like to insert an additional descriptor whilst leaving 291 * EFX_RXD_HEAD_ROOM for the non-recycle path */ 292 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); 293 if (unlikely(fill_level > rx_queue->max_fill)) { 294 /* We could place "state" on a list, and drain the list in 295 * efx_fast_push_rx_descriptors(). For now, this will do. */ 296 return; 297 } 298 299 ++state->refcnt; 300 get_page(rx_buf->u.page); 301 302 index = rx_queue->added_count & rx_queue->ptr_mask; 303 new_buf = efx_rx_buffer(rx_queue, index); 304 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 305 new_buf->u.page = rx_buf->u.page; 306 new_buf->len = rx_buf->len; 307 new_buf->flags = EFX_RX_BUF_PAGE; 308 ++rx_queue->added_count; 309 } 310 311 /* Recycle the given rx buffer directly back into the rx_queue. There is 312 * always room to add this buffer, because we've just popped a buffer. */ 313 static void efx_recycle_rx_buffer(struct efx_channel *channel, 314 struct efx_rx_buffer *rx_buf) 315 { 316 struct efx_nic *efx = channel->efx; 317 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 318 struct efx_rx_buffer *new_buf; 319 unsigned index; 320 321 rx_buf->flags &= EFX_RX_BUF_PAGE; 322 323 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && 324 efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 325 page_count(rx_buf->u.page) == 1) 326 efx_resurrect_rx_buffer(rx_queue, rx_buf); 327 328 index = rx_queue->added_count & rx_queue->ptr_mask; 329 new_buf = efx_rx_buffer(rx_queue, index); 330 331 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 332 rx_buf->u.page = NULL; 333 ++rx_queue->added_count; 334 } 335 336 /** 337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 338 * @rx_queue: RX descriptor queue 339 * 340 * This will aim to fill the RX descriptor queue up to 341 * @rx_queue->@max_fill. If there is insufficient atomic 342 * memory to do so, a slow fill will be scheduled. 343 * 344 * The caller must provide serialisation (none is used here). In practise, 345 * this means this function must run from the NAPI handler, or be called 346 * when NAPI is disabled. 347 */ 348 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 349 { 350 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 351 unsigned fill_level; 352 int space, rc = 0; 353 354 /* Calculate current fill level, and exit if we don't need to fill */ 355 fill_level = (rx_queue->added_count - rx_queue->removed_count); 356 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 357 if (fill_level >= rx_queue->fast_fill_trigger) 358 goto out; 359 360 /* Record minimum fill level */ 361 if (unlikely(fill_level < rx_queue->min_fill)) { 362 if (fill_level) 363 rx_queue->min_fill = fill_level; 364 } 365 366 space = rx_queue->max_fill - fill_level; 367 EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); 368 369 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 370 "RX queue %d fast-filling descriptor ring from" 371 " level %d to level %d using %s allocation\n", 372 efx_rx_queue_index(rx_queue), fill_level, 373 rx_queue->max_fill, 374 channel->rx_alloc_push_pages ? "page" : "skb"); 375 376 do { 377 if (channel->rx_alloc_push_pages) 378 rc = efx_init_rx_buffers_page(rx_queue); 379 else 380 rc = efx_init_rx_buffers_skb(rx_queue); 381 if (unlikely(rc)) { 382 /* Ensure that we don't leave the rx queue empty */ 383 if (rx_queue->added_count == rx_queue->removed_count) 384 efx_schedule_slow_fill(rx_queue); 385 goto out; 386 } 387 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 388 389 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 390 "RX queue %d fast-filled descriptor ring " 391 "to level %d\n", efx_rx_queue_index(rx_queue), 392 rx_queue->added_count - rx_queue->removed_count); 393 394 out: 395 if (rx_queue->notified_count != rx_queue->added_count) 396 efx_nic_notify_rx_desc(rx_queue); 397 } 398 399 void efx_rx_slow_fill(unsigned long context) 400 { 401 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 402 403 /* Post an event to cause NAPI to run and refill the queue */ 404 efx_nic_generate_fill_event(rx_queue); 405 ++rx_queue->slow_fill_count; 406 } 407 408 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 409 struct efx_rx_buffer *rx_buf, 410 int len, bool *leak_packet) 411 { 412 struct efx_nic *efx = rx_queue->efx; 413 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 414 415 if (likely(len <= max_len)) 416 return; 417 418 /* The packet must be discarded, but this is only a fatal error 419 * if the caller indicated it was 420 */ 421 rx_buf->flags |= EFX_RX_PKT_DISCARD; 422 423 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 424 if (net_ratelimit()) 425 netif_err(efx, rx_err, efx->net_dev, 426 " RX queue %d seriously overlength " 427 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 428 efx_rx_queue_index(rx_queue), len, max_len, 429 efx->type->rx_buffer_padding); 430 /* If this buffer was skb-allocated, then the meta 431 * data at the end of the skb will be trashed. So 432 * we have no choice but to leak the fragment. 433 */ 434 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); 435 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 436 } else { 437 if (net_ratelimit()) 438 netif_err(efx, rx_err, efx->net_dev, 439 " RX queue %d overlength RX event " 440 "(0x%x > 0x%x)\n", 441 efx_rx_queue_index(rx_queue), len, max_len); 442 } 443 444 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 445 } 446 447 /* Pass a received packet up through GRO. GRO can handle pages 448 * regardless of checksum state and skbs with a good checksum. 449 */ 450 static void efx_rx_packet_gro(struct efx_channel *channel, 451 struct efx_rx_buffer *rx_buf, 452 const u8 *eh) 453 { 454 struct napi_struct *napi = &channel->napi_str; 455 gro_result_t gro_result; 456 457 if (rx_buf->flags & EFX_RX_BUF_PAGE) { 458 struct efx_nic *efx = channel->efx; 459 struct page *page = rx_buf->u.page; 460 struct sk_buff *skb; 461 462 rx_buf->u.page = NULL; 463 464 skb = napi_get_frags(napi); 465 if (!skb) { 466 put_page(page); 467 return; 468 } 469 470 if (efx->net_dev->features & NETIF_F_RXHASH) 471 skb->rxhash = efx_rx_buf_hash(eh); 472 473 skb_fill_page_desc(skb, 0, page, 474 efx_rx_buf_offset(efx, rx_buf), rx_buf->len); 475 476 skb->len = rx_buf->len; 477 skb->data_len = rx_buf->len; 478 skb->truesize += rx_buf->len; 479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 481 482 skb_record_rx_queue(skb, channel->channel); 483 484 gro_result = napi_gro_frags(napi); 485 } else { 486 struct sk_buff *skb = rx_buf->u.skb; 487 488 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); 489 rx_buf->u.skb = NULL; 490 skb->ip_summed = CHECKSUM_UNNECESSARY; 491 492 gro_result = napi_gro_receive(napi, skb); 493 } 494 495 if (gro_result == GRO_NORMAL) { 496 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 497 } else if (gro_result != GRO_DROP) { 498 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; 499 channel->irq_mod_score += 2; 500 } 501 } 502 503 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 504 unsigned int len, u16 flags) 505 { 506 struct efx_nic *efx = rx_queue->efx; 507 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 508 struct efx_rx_buffer *rx_buf; 509 bool leak_packet = false; 510 511 rx_buf = efx_rx_buffer(rx_queue, index); 512 rx_buf->flags |= flags; 513 514 /* This allows the refill path to post another buffer. 515 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 516 * isn't overwritten yet. 517 */ 518 rx_queue->removed_count++; 519 520 /* Validate the length encoded in the event vs the descriptor pushed */ 521 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); 522 523 netif_vdbg(efx, rx_status, efx->net_dev, 524 "RX queue %d received id %x at %llx+%x %s%s\n", 525 efx_rx_queue_index(rx_queue), index, 526 (unsigned long long)rx_buf->dma_addr, len, 527 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 528 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 529 530 /* Discard packet, if instructed to do so */ 531 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 532 if (unlikely(leak_packet)) 533 channel->n_skbuff_leaks++; 534 else 535 efx_recycle_rx_buffer(channel, rx_buf); 536 537 /* Don't hold off the previous receive */ 538 rx_buf = NULL; 539 goto out; 540 } 541 542 /* Release card resources - assumes all RX buffers consumed in-order 543 * per RX queue 544 */ 545 efx_unmap_rx_buffer(efx, rx_buf); 546 547 /* Prefetch nice and early so data will (hopefully) be in cache by 548 * the time we look at it. 549 */ 550 prefetch(efx_rx_buf_eh(efx, rx_buf)); 551 552 /* Pipeline receives so that we give time for packet headers to be 553 * prefetched into cache. 554 */ 555 rx_buf->len = len - efx->type->rx_buffer_hash_size; 556 out: 557 if (channel->rx_pkt) 558 __efx_rx_packet(channel, channel->rx_pkt); 559 channel->rx_pkt = rx_buf; 560 } 561 562 static void efx_rx_deliver(struct efx_channel *channel, 563 struct efx_rx_buffer *rx_buf) 564 { 565 struct sk_buff *skb; 566 567 /* We now own the SKB */ 568 skb = rx_buf->u.skb; 569 rx_buf->u.skb = NULL; 570 571 /* Set the SKB flags */ 572 skb_checksum_none_assert(skb); 573 574 /* Pass the packet up */ 575 netif_receive_skb(skb); 576 577 /* Update allocation strategy method */ 578 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 579 } 580 581 /* Handle a received packet. Second half: Touches packet payload. */ 582 void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) 583 { 584 struct efx_nic *efx = channel->efx; 585 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 586 587 /* If we're in loopback test, then pass the packet directly to the 588 * loopback layer, and free the rx_buf here 589 */ 590 if (unlikely(efx->loopback_selftest)) { 591 efx_loopback_rx_packet(efx, eh, rx_buf->len); 592 efx_free_rx_buffer(efx, rx_buf); 593 return; 594 } 595 596 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { 597 struct sk_buff *skb = rx_buf->u.skb; 598 599 prefetch(skb_shinfo(skb)); 600 601 skb_reserve(skb, efx->type->rx_buffer_hash_size); 602 skb_put(skb, rx_buf->len); 603 604 if (efx->net_dev->features & NETIF_F_RXHASH) 605 skb->rxhash = efx_rx_buf_hash(eh); 606 607 /* Move past the ethernet header. rx_buf->data still points 608 * at the ethernet header */ 609 skb->protocol = eth_type_trans(skb, efx->net_dev); 610 611 skb_record_rx_queue(skb, channel->channel); 612 } 613 614 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 615 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 616 617 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) 618 efx_rx_packet_gro(channel, rx_buf, eh); 619 else 620 efx_rx_deliver(channel, rx_buf); 621 } 622 623 void efx_rx_strategy(struct efx_channel *channel) 624 { 625 enum efx_rx_alloc_method method = rx_alloc_method; 626 627 /* Only makes sense to use page based allocation if GRO is enabled */ 628 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 629 method = RX_ALLOC_METHOD_SKB; 630 } else if (method == RX_ALLOC_METHOD_AUTO) { 631 /* Constrain the rx_alloc_level */ 632 if (channel->rx_alloc_level < 0) 633 channel->rx_alloc_level = 0; 634 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) 635 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; 636 637 /* Decide on the allocation method */ 638 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? 639 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); 640 } 641 642 /* Push the option */ 643 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); 644 } 645 646 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 647 { 648 struct efx_nic *efx = rx_queue->efx; 649 unsigned int entries; 650 int rc; 651 652 /* Create the smallest power-of-two aligned ring */ 653 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 654 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 655 rx_queue->ptr_mask = entries - 1; 656 657 netif_dbg(efx, probe, efx->net_dev, 658 "creating RX queue %d size %#x mask %#x\n", 659 efx_rx_queue_index(rx_queue), efx->rxq_entries, 660 rx_queue->ptr_mask); 661 662 /* Allocate RX buffers */ 663 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 664 GFP_KERNEL); 665 if (!rx_queue->buffer) 666 return -ENOMEM; 667 668 rc = efx_nic_probe_rx(rx_queue); 669 if (rc) { 670 kfree(rx_queue->buffer); 671 rx_queue->buffer = NULL; 672 } 673 return rc; 674 } 675 676 void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 677 { 678 struct efx_nic *efx = rx_queue->efx; 679 unsigned int max_fill, trigger, max_trigger; 680 681 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 682 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 683 684 /* Initialise ptr fields */ 685 rx_queue->added_count = 0; 686 rx_queue->notified_count = 0; 687 rx_queue->removed_count = 0; 688 rx_queue->min_fill = -1U; 689 690 /* Initialise limit fields */ 691 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 692 max_trigger = max_fill - EFX_RX_BATCH; 693 if (rx_refill_threshold != 0) { 694 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 695 if (trigger > max_trigger) 696 trigger = max_trigger; 697 } else { 698 trigger = max_trigger; 699 } 700 701 rx_queue->max_fill = max_fill; 702 rx_queue->fast_fill_trigger = trigger; 703 704 /* Set up RX descriptor ring */ 705 rx_queue->enabled = true; 706 efx_nic_init_rx(rx_queue); 707 } 708 709 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 710 { 711 int i; 712 struct efx_rx_buffer *rx_buf; 713 714 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 715 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 716 717 /* A flush failure might have left rx_queue->enabled */ 718 rx_queue->enabled = false; 719 720 del_timer_sync(&rx_queue->slow_fill); 721 efx_nic_fini_rx(rx_queue); 722 723 /* Release RX buffers NB start at index 0 not current HW ptr */ 724 if (rx_queue->buffer) { 725 for (i = 0; i <= rx_queue->ptr_mask; i++) { 726 rx_buf = efx_rx_buffer(rx_queue, i); 727 efx_fini_rx_buffer(rx_queue, rx_buf); 728 } 729 } 730 } 731 732 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 733 { 734 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 735 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 736 737 efx_nic_remove_rx(rx_queue); 738 739 kfree(rx_queue->buffer); 740 rx_queue->buffer = NULL; 741 } 742 743 744 module_param(rx_alloc_method, int, 0644); 745 MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); 746 747 module_param(rx_refill_threshold, uint, 0444); 748 MODULE_PARM_DESC(rx_refill_threshold, 749 "RX descriptor ring refill threshold (%)"); 750 751