Lines Matching refs:rx_ring

163 static void wx_dma_sync_frag(struct wx_ring *rx_ring,  in wx_dma_sync_frag()  argument
169 dma_sync_single_range_for_cpu(rx_ring->dev, in wx_dma_sync_frag()
177 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); in wx_dma_sync_frag()
180 static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, in wx_get_rx_buffer() argument
188 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in wx_get_rx_buffer()
209 wx_dma_sync_frag(rx_ring, rx_buffer); in wx_get_rx_buffer()
213 dma_sync_single_range_for_cpu(rx_ring->dev, in wx_get_rx_buffer()
222 static void wx_put_rx_buffer(struct wx_ring *rx_ring, in wx_put_rx_buffer() argument
236 static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, in wx_build_skb() argument
259 skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); in wx_build_skb()
272 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); in wx_build_skb()
303 static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, in wx_alloc_mapped_page() argument
313 page = page_pool_dev_alloc_pages(rx_ring->page_pool); in wx_alloc_mapped_page()
329 void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) in wx_alloc_rx_buffers() argument
331 u16 i = rx_ring->next_to_use; in wx_alloc_rx_buffers()
339 rx_desc = WX_RX_DESC(rx_ring, i); in wx_alloc_rx_buffers()
340 bi = &rx_ring->rx_buffer_info[i]; in wx_alloc_rx_buffers()
341 i -= rx_ring->count; in wx_alloc_rx_buffers()
344 if (!wx_alloc_mapped_page(rx_ring, bi)) in wx_alloc_rx_buffers()
348 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in wx_alloc_rx_buffers()
360 rx_desc = WX_RX_DESC(rx_ring, 0); in wx_alloc_rx_buffers()
361 bi = rx_ring->rx_buffer_info; in wx_alloc_rx_buffers()
362 i -= rx_ring->count; in wx_alloc_rx_buffers()
371 i += rx_ring->count; in wx_alloc_rx_buffers()
373 if (rx_ring->next_to_use != i) { in wx_alloc_rx_buffers()
374 rx_ring->next_to_use = i; in wx_alloc_rx_buffers()
376 rx_ring->next_to_alloc = i; in wx_alloc_rx_buffers()
384 writel(i, rx_ring->tail); in wx_alloc_rx_buffers()
407 static bool wx_is_non_eop(struct wx_ring *rx_ring, in wx_is_non_eop() argument
411 u32 ntc = rx_ring->next_to_clean + 1; in wx_is_non_eop()
414 ntc = (ntc < rx_ring->count) ? ntc : 0; in wx_is_non_eop()
415 rx_ring->next_to_clean = ntc; in wx_is_non_eop()
417 prefetch(WX_RX_DESC(rx_ring, ntc)); in wx_is_non_eop()
423 rx_ring->rx_buffer_info[ntc].skb = skb; in wx_is_non_eop()
473 static bool wx_cleanup_headers(struct wx_ring *rx_ring, in wx_cleanup_headers() argument
477 struct net_device *netdev = rx_ring->netdev; in wx_cleanup_headers()
597 static void wx_process_skb_fields(struct wx_ring *rx_ring, in wx_process_skb_fields() argument
601 wx_rx_hash(rx_ring, rx_desc, skb); in wx_process_skb_fields()
602 wx_rx_checksum(rx_ring, rx_desc, skb); in wx_process_skb_fields()
603 wx_rx_vlan(rx_ring, rx_desc, skb); in wx_process_skb_fields()
604 skb_record_rx_queue(skb, rx_ring->queue_index); in wx_process_skb_fields()
605 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in wx_process_skb_fields()
622 struct wx_ring *rx_ring, in wx_clean_rx_irq() argument
626 u16 cleaned_count = wx_desc_unused(rx_ring); in wx_clean_rx_irq()
636 wx_alloc_rx_buffers(rx_ring, cleaned_count); in wx_clean_rx_irq()
640 rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); in wx_clean_rx_irq()
650 rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); in wx_clean_rx_irq()
653 skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); in wx_clean_rx_irq()
660 wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); in wx_clean_rx_irq()
664 if (wx_is_non_eop(rx_ring, rx_desc, skb)) in wx_clean_rx_irq()
668 if (wx_cleanup_headers(rx_ring, rx_desc, skb)) in wx_clean_rx_irq()
675 wx_process_skb_fields(rx_ring, rx_desc, skb); in wx_clean_rx_irq()
682 u64_stats_update_begin(&rx_ring->syncp); in wx_clean_rx_irq()
683 rx_ring->stats.packets += total_rx_packets; in wx_clean_rx_irq()
684 rx_ring->stats.bytes += total_rx_bytes; in wx_clean_rx_irq()
685 u64_stats_update_end(&rx_ring->syncp); in wx_clean_rx_irq()
1677 wx->rx_ring[i]->reg_idx = i; in wx_cache_ring_rss()
1790 wx->rx_ring[rxr_idx] = ring; in wx_alloc_q_vector()
1821 wx->rx_ring[ring->queue_index] = NULL; in wx_free_q_vector()
2148 static void wx_clean_rx_ring(struct wx_ring *rx_ring) in wx_clean_rx_ring() argument
2151 u16 i = rx_ring->next_to_clean; in wx_clean_rx_ring()
2153 rx_buffer = &rx_ring->rx_buffer_info[i]; in wx_clean_rx_ring()
2156 while (i != rx_ring->next_to_alloc) { in wx_clean_rx_ring()
2161 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); in wx_clean_rx_ring()
2169 dma_sync_single_range_for_cpu(rx_ring->dev, in wx_clean_rx_ring()
2176 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); in wx_clean_rx_ring()
2180 if (i == rx_ring->count) { in wx_clean_rx_ring()
2182 rx_buffer = rx_ring->rx_buffer_info; in wx_clean_rx_ring()
2186 rx_ring->next_to_alloc = 0; in wx_clean_rx_ring()
2187 rx_ring->next_to_clean = 0; in wx_clean_rx_ring()
2188 rx_ring->next_to_use = 0; in wx_clean_rx_ring()
2200 wx_clean_rx_ring(wx->rx_ring[i]); in wx_clean_all_rx_rings()
2210 static void wx_free_rx_resources(struct wx_ring *rx_ring) in wx_free_rx_resources() argument
2212 wx_clean_rx_ring(rx_ring); in wx_free_rx_resources()
2213 kvfree(rx_ring->rx_buffer_info); in wx_free_rx_resources()
2214 rx_ring->rx_buffer_info = NULL; in wx_free_rx_resources()
2217 if (!rx_ring->desc) in wx_free_rx_resources()
2220 dma_free_coherent(rx_ring->dev, rx_ring->size, in wx_free_rx_resources()
2221 rx_ring->desc, rx_ring->dma); in wx_free_rx_resources()
2223 rx_ring->desc = NULL; in wx_free_rx_resources()
2225 if (rx_ring->page_pool) { in wx_free_rx_resources()
2226 page_pool_destroy(rx_ring->page_pool); in wx_free_rx_resources()
2227 rx_ring->page_pool = NULL; in wx_free_rx_resources()
2242 wx_free_rx_resources(wx->rx_ring[i]); in wx_free_all_rx_resources()
2363 static int wx_alloc_page_pool(struct wx_ring *rx_ring) in wx_alloc_page_pool() argument
2370 .pool_size = rx_ring->size, in wx_alloc_page_pool()
2371 .nid = dev_to_node(rx_ring->dev), in wx_alloc_page_pool()
2372 .dev = rx_ring->dev, in wx_alloc_page_pool()
2378 rx_ring->page_pool = page_pool_create(&pp_params); in wx_alloc_page_pool()
2379 if (IS_ERR(rx_ring->page_pool)) { in wx_alloc_page_pool()
2380 ret = PTR_ERR(rx_ring->page_pool); in wx_alloc_page_pool()
2381 rx_ring->page_pool = NULL; in wx_alloc_page_pool()
2393 static int wx_setup_rx_resources(struct wx_ring *rx_ring) in wx_setup_rx_resources() argument
2395 struct device *dev = rx_ring->dev; in wx_setup_rx_resources()
2400 size = sizeof(struct wx_rx_buffer) * rx_ring->count; in wx_setup_rx_resources()
2402 if (rx_ring->q_vector) in wx_setup_rx_resources()
2403 numa_node = rx_ring->q_vector->numa_node; in wx_setup_rx_resources()
2405 rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); in wx_setup_rx_resources()
2406 if (!rx_ring->rx_buffer_info) in wx_setup_rx_resources()
2407 rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); in wx_setup_rx_resources()
2408 if (!rx_ring->rx_buffer_info) in wx_setup_rx_resources()
2412 rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); in wx_setup_rx_resources()
2413 rx_ring->size = ALIGN(rx_ring->size, 4096); in wx_setup_rx_resources()
2416 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in wx_setup_rx_resources()
2417 &rx_ring->dma, GFP_KERNEL); in wx_setup_rx_resources()
2418 if (!rx_ring->desc) { in wx_setup_rx_resources()
2420 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in wx_setup_rx_resources()
2421 &rx_ring->dma, GFP_KERNEL); in wx_setup_rx_resources()
2424 if (!rx_ring->desc) in wx_setup_rx_resources()
2427 rx_ring->next_to_clean = 0; in wx_setup_rx_resources()
2428 rx_ring->next_to_use = 0; in wx_setup_rx_resources()
2430 ret = wx_alloc_page_pool(rx_ring); in wx_setup_rx_resources()
2432 dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); in wx_setup_rx_resources()
2439 dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma); in wx_setup_rx_resources()
2441 kvfree(rx_ring->rx_buffer_info); in wx_setup_rx_resources()
2442 rx_ring->rx_buffer_info = NULL; in wx_setup_rx_resources()
2462 err = wx_setup_rx_resources(wx->rx_ring[i]); in wx_setup_all_rx_resources()
2474 wx_free_rx_resources(wx->rx_ring[i]); in wx_setup_all_rx_resources()
2603 struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); in wx_get_stats64()