11751cc36SAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
21751cc36SAlex Maftei (amaftei) /****************************************************************************
31751cc36SAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
41751cc36SAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
51751cc36SAlex Maftei (amaftei)  *
61751cc36SAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
71751cc36SAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
81751cc36SAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
91751cc36SAlex Maftei (amaftei)  */
101751cc36SAlex Maftei (amaftei) 
111751cc36SAlex Maftei (amaftei) #include "net_driver.h"
121751cc36SAlex Maftei (amaftei) #include <linux/module.h>
133d95b884SAlex Maftei (amaftei) #include <linux/iommu.h>
141751cc36SAlex Maftei (amaftei) #include "efx.h"
151751cc36SAlex Maftei (amaftei) #include "nic.h"
161751cc36SAlex Maftei (amaftei) #include "rx_common.h"
171751cc36SAlex Maftei (amaftei) 
181751cc36SAlex Maftei (amaftei) /* This is the percentage fill level below which new RX descriptors
191751cc36SAlex Maftei (amaftei)  * will be added to the RX descriptor ring.
201751cc36SAlex Maftei (amaftei)  */
211751cc36SAlex Maftei (amaftei) static unsigned int rx_refill_threshold;
221751cc36SAlex Maftei (amaftei) module_param(rx_refill_threshold, uint, 0444);
231751cc36SAlex Maftei (amaftei) MODULE_PARM_DESC(rx_refill_threshold,
241751cc36SAlex Maftei (amaftei) 		 "RX descriptor ring refill threshold (%)");
251751cc36SAlex Maftei (amaftei) 
263d95b884SAlex Maftei (amaftei) /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
273d95b884SAlex Maftei (amaftei)  * ring, this number is divided by the number of buffers per page to calculate
283d95b884SAlex Maftei (amaftei)  * the number of pages to store in the RX page recycle ring.
293d95b884SAlex Maftei (amaftei)  */
303d95b884SAlex Maftei (amaftei) #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
313d95b884SAlex Maftei (amaftei) #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
323d95b884SAlex Maftei (amaftei) 
331751cc36SAlex Maftei (amaftei) /* RX maximum head room required.
341751cc36SAlex Maftei (amaftei)  *
351751cc36SAlex Maftei (amaftei)  * This must be at least 1 to prevent overflow, plus one packet-worth
361751cc36SAlex Maftei (amaftei)  * to allow pipelined receives.
371751cc36SAlex Maftei (amaftei)  */
381751cc36SAlex Maftei (amaftei) #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
391751cc36SAlex Maftei (amaftei) 
403d95b884SAlex Maftei (amaftei) /* Check the RX page recycle ring for a page that can be reused. */
413d95b884SAlex Maftei (amaftei) static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
423d95b884SAlex Maftei (amaftei) {
433d95b884SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
443d95b884SAlex Maftei (amaftei) 	struct efx_rx_page_state *state;
453d95b884SAlex Maftei (amaftei) 	unsigned int index;
463d95b884SAlex Maftei (amaftei) 	struct page *page;
473d95b884SAlex Maftei (amaftei) 
483d95b884SAlex Maftei (amaftei) 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
493d95b884SAlex Maftei (amaftei) 	page = rx_queue->page_ring[index];
503d95b884SAlex Maftei (amaftei) 	if (page == NULL)
513d95b884SAlex Maftei (amaftei) 		return NULL;
523d95b884SAlex Maftei (amaftei) 
533d95b884SAlex Maftei (amaftei) 	rx_queue->page_ring[index] = NULL;
543d95b884SAlex Maftei (amaftei) 	/* page_remove cannot exceed page_add. */
553d95b884SAlex Maftei (amaftei) 	if (rx_queue->page_remove != rx_queue->page_add)
563d95b884SAlex Maftei (amaftei) 		++rx_queue->page_remove;
573d95b884SAlex Maftei (amaftei) 
583d95b884SAlex Maftei (amaftei) 	/* If page_count is 1 then we hold the only reference to this page. */
593d95b884SAlex Maftei (amaftei) 	if (page_count(page) == 1) {
603d95b884SAlex Maftei (amaftei) 		++rx_queue->page_recycle_count;
613d95b884SAlex Maftei (amaftei) 		return page;
623d95b884SAlex Maftei (amaftei) 	} else {
633d95b884SAlex Maftei (amaftei) 		state = page_address(page);
643d95b884SAlex Maftei (amaftei) 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
653d95b884SAlex Maftei (amaftei) 			       PAGE_SIZE << efx->rx_buffer_order,
663d95b884SAlex Maftei (amaftei) 			       DMA_FROM_DEVICE);
673d95b884SAlex Maftei (amaftei) 		put_page(page);
683d95b884SAlex Maftei (amaftei) 		++rx_queue->page_recycle_failed;
693d95b884SAlex Maftei (amaftei) 	}
703d95b884SAlex Maftei (amaftei) 
713d95b884SAlex Maftei (amaftei) 	return NULL;
723d95b884SAlex Maftei (amaftei) }
733d95b884SAlex Maftei (amaftei) 
743d95b884SAlex Maftei (amaftei) /* Attempt to recycle the page if there is an RX recycle ring; the page can
753d95b884SAlex Maftei (amaftei)  * only be added if this is the final RX buffer, to prevent pages being used in
763d95b884SAlex Maftei (amaftei)  * the descriptor ring and appearing in the recycle ring simultaneously.
773d95b884SAlex Maftei (amaftei)  */
783d95b884SAlex Maftei (amaftei) static void efx_recycle_rx_page(struct efx_channel *channel,
793d95b884SAlex Maftei (amaftei) 				struct efx_rx_buffer *rx_buf)
803d95b884SAlex Maftei (amaftei) {
813d95b884SAlex Maftei (amaftei) 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
823d95b884SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
833d95b884SAlex Maftei (amaftei) 	struct page *page = rx_buf->page;
843d95b884SAlex Maftei (amaftei) 	unsigned int index;
853d95b884SAlex Maftei (amaftei) 
863d95b884SAlex Maftei (amaftei) 	/* Only recycle the page after processing the final buffer. */
873d95b884SAlex Maftei (amaftei) 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
883d95b884SAlex Maftei (amaftei) 		return;
893d95b884SAlex Maftei (amaftei) 
903d95b884SAlex Maftei (amaftei) 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
913d95b884SAlex Maftei (amaftei) 	if (rx_queue->page_ring[index] == NULL) {
923d95b884SAlex Maftei (amaftei) 		unsigned int read_index = rx_queue->page_remove &
933d95b884SAlex Maftei (amaftei) 			rx_queue->page_ptr_mask;
943d95b884SAlex Maftei (amaftei) 
953d95b884SAlex Maftei (amaftei) 		/* The next slot in the recycle ring is available, but
963d95b884SAlex Maftei (amaftei) 		 * increment page_remove if the read pointer currently
973d95b884SAlex Maftei (amaftei) 		 * points here.
983d95b884SAlex Maftei (amaftei) 		 */
993d95b884SAlex Maftei (amaftei) 		if (read_index == index)
1003d95b884SAlex Maftei (amaftei) 			++rx_queue->page_remove;
1013d95b884SAlex Maftei (amaftei) 		rx_queue->page_ring[index] = page;
1023d95b884SAlex Maftei (amaftei) 		++rx_queue->page_add;
1033d95b884SAlex Maftei (amaftei) 		return;
1043d95b884SAlex Maftei (amaftei) 	}
1053d95b884SAlex Maftei (amaftei) 	++rx_queue->page_recycle_full;
1063d95b884SAlex Maftei (amaftei) 	efx_unmap_rx_buffer(efx, rx_buf);
1073d95b884SAlex Maftei (amaftei) 	put_page(rx_buf->page);
1083d95b884SAlex Maftei (amaftei) }
1093d95b884SAlex Maftei (amaftei) 
1103d95b884SAlex Maftei (amaftei) /* Recycle the pages that are used by buffers that have just been received. */
1113d95b884SAlex Maftei (amaftei) void efx_recycle_rx_pages(struct efx_channel *channel,
1123d95b884SAlex Maftei (amaftei) 			  struct efx_rx_buffer *rx_buf,
1133d95b884SAlex Maftei (amaftei) 			  unsigned int n_frags)
1143d95b884SAlex Maftei (amaftei) {
1153d95b884SAlex Maftei (amaftei) 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
1163d95b884SAlex Maftei (amaftei) 
1173d95b884SAlex Maftei (amaftei) 	do {
1183d95b884SAlex Maftei (amaftei) 		efx_recycle_rx_page(channel, rx_buf);
1193d95b884SAlex Maftei (amaftei) 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
1203d95b884SAlex Maftei (amaftei) 	} while (--n_frags);
1213d95b884SAlex Maftei (amaftei) }
1223d95b884SAlex Maftei (amaftei) 
1233d95b884SAlex Maftei (amaftei) void efx_discard_rx_packet(struct efx_channel *channel,
1243d95b884SAlex Maftei (amaftei) 			   struct efx_rx_buffer *rx_buf,
1253d95b884SAlex Maftei (amaftei) 			   unsigned int n_frags)
1263d95b884SAlex Maftei (amaftei) {
1273d95b884SAlex Maftei (amaftei) 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
1283d95b884SAlex Maftei (amaftei) 
1293d95b884SAlex Maftei (amaftei) 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
1303d95b884SAlex Maftei (amaftei) 
1313d95b884SAlex Maftei (amaftei) 	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
1323d95b884SAlex Maftei (amaftei) }
1333d95b884SAlex Maftei (amaftei) 
1343d95b884SAlex Maftei (amaftei) static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
1353d95b884SAlex Maftei (amaftei) {
1363d95b884SAlex Maftei (amaftei) 	unsigned int bufs_in_recycle_ring, page_ring_size;
1373d95b884SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
1383d95b884SAlex Maftei (amaftei) 
1393d95b884SAlex Maftei (amaftei) 	/* Set the RX recycle ring size */
1403d95b884SAlex Maftei (amaftei) #ifdef CONFIG_PPC64
1413d95b884SAlex Maftei (amaftei) 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
1423d95b884SAlex Maftei (amaftei) #else
1433d95b884SAlex Maftei (amaftei) 	if (iommu_present(&pci_bus_type))
1443d95b884SAlex Maftei (amaftei) 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
1453d95b884SAlex Maftei (amaftei) 	else
1463d95b884SAlex Maftei (amaftei) 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
1473d95b884SAlex Maftei (amaftei) #endif /* CONFIG_PPC64 */
1483d95b884SAlex Maftei (amaftei) 
1493d95b884SAlex Maftei (amaftei) 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
1503d95b884SAlex Maftei (amaftei) 					    efx->rx_bufs_per_page);
1513d95b884SAlex Maftei (amaftei) 	rx_queue->page_ring = kcalloc(page_ring_size,
1523d95b884SAlex Maftei (amaftei) 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
1533d95b884SAlex Maftei (amaftei) 	rx_queue->page_ptr_mask = page_ring_size - 1;
1543d95b884SAlex Maftei (amaftei) }
1553d95b884SAlex Maftei (amaftei) 
1563d95b884SAlex Maftei (amaftei) static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
1573d95b884SAlex Maftei (amaftei) {
1583d95b884SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
1593d95b884SAlex Maftei (amaftei) 	int i;
1603d95b884SAlex Maftei (amaftei) 
1613d95b884SAlex Maftei (amaftei) 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
1623d95b884SAlex Maftei (amaftei) 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
1633d95b884SAlex Maftei (amaftei) 		struct page *page = rx_queue->page_ring[i];
1643d95b884SAlex Maftei (amaftei) 		struct efx_rx_page_state *state;
1653d95b884SAlex Maftei (amaftei) 
1663d95b884SAlex Maftei (amaftei) 		if (page == NULL)
1673d95b884SAlex Maftei (amaftei) 			continue;
1683d95b884SAlex Maftei (amaftei) 
1693d95b884SAlex Maftei (amaftei) 		state = page_address(page);
1703d95b884SAlex Maftei (amaftei) 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
1713d95b884SAlex Maftei (amaftei) 			       PAGE_SIZE << efx->rx_buffer_order,
1723d95b884SAlex Maftei (amaftei) 			       DMA_FROM_DEVICE);
1733d95b884SAlex Maftei (amaftei) 		put_page(page);
1743d95b884SAlex Maftei (amaftei) 	}
1753d95b884SAlex Maftei (amaftei) 	kfree(rx_queue->page_ring);
1763d95b884SAlex Maftei (amaftei) 	rx_queue->page_ring = NULL;
1773d95b884SAlex Maftei (amaftei) }
1783d95b884SAlex Maftei (amaftei) 
1791751cc36SAlex Maftei (amaftei) static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
1801751cc36SAlex Maftei (amaftei) 			       struct efx_rx_buffer *rx_buf)
1811751cc36SAlex Maftei (amaftei) {
1821751cc36SAlex Maftei (amaftei) 	/* Release the page reference we hold for the buffer. */
1831751cc36SAlex Maftei (amaftei) 	if (rx_buf->page)
1841751cc36SAlex Maftei (amaftei) 		put_page(rx_buf->page);
1851751cc36SAlex Maftei (amaftei) 
1861751cc36SAlex Maftei (amaftei) 	/* If this is the last buffer in a page, unmap and free it. */
1871751cc36SAlex Maftei (amaftei) 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
1881751cc36SAlex Maftei (amaftei) 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
1891751cc36SAlex Maftei (amaftei) 		efx_free_rx_buffers(rx_queue, rx_buf, 1);
1901751cc36SAlex Maftei (amaftei) 	}
1911751cc36SAlex Maftei (amaftei) 	rx_buf->page = NULL;
1921751cc36SAlex Maftei (amaftei) }
1931751cc36SAlex Maftei (amaftei) 
1941751cc36SAlex Maftei (amaftei) int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
1951751cc36SAlex Maftei (amaftei) {
1961751cc36SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
1971751cc36SAlex Maftei (amaftei) 	unsigned int entries;
1981751cc36SAlex Maftei (amaftei) 	int rc;
1991751cc36SAlex Maftei (amaftei) 
2001751cc36SAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2011751cc36SAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
2021751cc36SAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
2031751cc36SAlex Maftei (amaftei) 	rx_queue->ptr_mask = entries - 1;
2041751cc36SAlex Maftei (amaftei) 
2051751cc36SAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
2061751cc36SAlex Maftei (amaftei) 		  "creating RX queue %d size %#x mask %#x\n",
2071751cc36SAlex Maftei (amaftei) 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
2081751cc36SAlex Maftei (amaftei) 		  rx_queue->ptr_mask);
2091751cc36SAlex Maftei (amaftei) 
2101751cc36SAlex Maftei (amaftei) 	/* Allocate RX buffers */
2111751cc36SAlex Maftei (amaftei) 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
2121751cc36SAlex Maftei (amaftei) 				   GFP_KERNEL);
2131751cc36SAlex Maftei (amaftei) 	if (!rx_queue->buffer)
2141751cc36SAlex Maftei (amaftei) 		return -ENOMEM;
2151751cc36SAlex Maftei (amaftei) 
2161751cc36SAlex Maftei (amaftei) 	rc = efx_nic_probe_rx(rx_queue);
2171751cc36SAlex Maftei (amaftei) 	if (rc) {
2181751cc36SAlex Maftei (amaftei) 		kfree(rx_queue->buffer);
2191751cc36SAlex Maftei (amaftei) 		rx_queue->buffer = NULL;
2201751cc36SAlex Maftei (amaftei) 	}
2211751cc36SAlex Maftei (amaftei) 
2221751cc36SAlex Maftei (amaftei) 	return rc;
2231751cc36SAlex Maftei (amaftei) }
2241751cc36SAlex Maftei (amaftei) 
2251751cc36SAlex Maftei (amaftei) void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
2261751cc36SAlex Maftei (amaftei) {
2271751cc36SAlex Maftei (amaftei) 	unsigned int max_fill, trigger, max_trigger;
2281751cc36SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
2291751cc36SAlex Maftei (amaftei) 	int rc = 0;
2301751cc36SAlex Maftei (amaftei) 
2311751cc36SAlex Maftei (amaftei) 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
2321751cc36SAlex Maftei (amaftei) 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
2331751cc36SAlex Maftei (amaftei) 
2341751cc36SAlex Maftei (amaftei) 	/* Initialise ptr fields */
2351751cc36SAlex Maftei (amaftei) 	rx_queue->added_count = 0;
2361751cc36SAlex Maftei (amaftei) 	rx_queue->notified_count = 0;
2371751cc36SAlex Maftei (amaftei) 	rx_queue->removed_count = 0;
2381751cc36SAlex Maftei (amaftei) 	rx_queue->min_fill = -1U;
2391751cc36SAlex Maftei (amaftei) 	efx_init_rx_recycle_ring(rx_queue);
2401751cc36SAlex Maftei (amaftei) 
2411751cc36SAlex Maftei (amaftei) 	rx_queue->page_remove = 0;
2421751cc36SAlex Maftei (amaftei) 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
2431751cc36SAlex Maftei (amaftei) 	rx_queue->page_recycle_count = 0;
2441751cc36SAlex Maftei (amaftei) 	rx_queue->page_recycle_failed = 0;
2451751cc36SAlex Maftei (amaftei) 	rx_queue->page_recycle_full = 0;
2461751cc36SAlex Maftei (amaftei) 
2471751cc36SAlex Maftei (amaftei) 	/* Initialise limit fields */
2481751cc36SAlex Maftei (amaftei) 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
2491751cc36SAlex Maftei (amaftei) 	max_trigger =
2501751cc36SAlex Maftei (amaftei) 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
2511751cc36SAlex Maftei (amaftei) 	if (rx_refill_threshold != 0) {
2521751cc36SAlex Maftei (amaftei) 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
2531751cc36SAlex Maftei (amaftei) 		if (trigger > max_trigger)
2541751cc36SAlex Maftei (amaftei) 			trigger = max_trigger;
2551751cc36SAlex Maftei (amaftei) 	} else {
2561751cc36SAlex Maftei (amaftei) 		trigger = max_trigger;
2571751cc36SAlex Maftei (amaftei) 	}
2581751cc36SAlex Maftei (amaftei) 
2591751cc36SAlex Maftei (amaftei) 	rx_queue->max_fill = max_fill;
2601751cc36SAlex Maftei (amaftei) 	rx_queue->fast_fill_trigger = trigger;
2611751cc36SAlex Maftei (amaftei) 	rx_queue->refill_enabled = true;
2621751cc36SAlex Maftei (amaftei) 
2631751cc36SAlex Maftei (amaftei) 	/* Initialise XDP queue information */
2641751cc36SAlex Maftei (amaftei) 	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
2651751cc36SAlex Maftei (amaftei) 			      rx_queue->core_index);
2661751cc36SAlex Maftei (amaftei) 
2671751cc36SAlex Maftei (amaftei) 	if (rc) {
2681751cc36SAlex Maftei (amaftei) 		netif_err(efx, rx_err, efx->net_dev,
2691751cc36SAlex Maftei (amaftei) 			  "Failure to initialise XDP queue information rc=%d\n",
2701751cc36SAlex Maftei (amaftei) 			  rc);
2711751cc36SAlex Maftei (amaftei) 		efx->xdp_rxq_info_failed = true;
2721751cc36SAlex Maftei (amaftei) 	} else {
2731751cc36SAlex Maftei (amaftei) 		rx_queue->xdp_rxq_info_valid = true;
2741751cc36SAlex Maftei (amaftei) 	}
2751751cc36SAlex Maftei (amaftei) 
2761751cc36SAlex Maftei (amaftei) 	/* Set up RX descriptor ring */
2771751cc36SAlex Maftei (amaftei) 	efx_nic_init_rx(rx_queue);
2781751cc36SAlex Maftei (amaftei) }
2791751cc36SAlex Maftei (amaftei) 
2801751cc36SAlex Maftei (amaftei) void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
2811751cc36SAlex Maftei (amaftei) {
2821751cc36SAlex Maftei (amaftei) 	struct efx_rx_buffer *rx_buf;
2831751cc36SAlex Maftei (amaftei) 	int i;
2841751cc36SAlex Maftei (amaftei) 
2851751cc36SAlex Maftei (amaftei) 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
2861751cc36SAlex Maftei (amaftei) 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
2871751cc36SAlex Maftei (amaftei) 
2881751cc36SAlex Maftei (amaftei) 	del_timer_sync(&rx_queue->slow_fill);
2891751cc36SAlex Maftei (amaftei) 
2901751cc36SAlex Maftei (amaftei) 	/* Release RX buffers from the current read ptr to the write ptr */
2911751cc36SAlex Maftei (amaftei) 	if (rx_queue->buffer) {
2921751cc36SAlex Maftei (amaftei) 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
2931751cc36SAlex Maftei (amaftei) 		     i++) {
2941751cc36SAlex Maftei (amaftei) 			unsigned int index = i & rx_queue->ptr_mask;
2951751cc36SAlex Maftei (amaftei) 
2961751cc36SAlex Maftei (amaftei) 			rx_buf = efx_rx_buffer(rx_queue, index);
2971751cc36SAlex Maftei (amaftei) 			efx_fini_rx_buffer(rx_queue, rx_buf);
2981751cc36SAlex Maftei (amaftei) 		}
2991751cc36SAlex Maftei (amaftei) 	}
3001751cc36SAlex Maftei (amaftei) 
3013d95b884SAlex Maftei (amaftei) 	efx_fini_rx_recycle_ring(rx_queue);
3021751cc36SAlex Maftei (amaftei) 
3031751cc36SAlex Maftei (amaftei) 	if (rx_queue->xdp_rxq_info_valid)
3041751cc36SAlex Maftei (amaftei) 		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
3051751cc36SAlex Maftei (amaftei) 
3061751cc36SAlex Maftei (amaftei) 	rx_queue->xdp_rxq_info_valid = false;
3071751cc36SAlex Maftei (amaftei) }
3081751cc36SAlex Maftei (amaftei) 
3091751cc36SAlex Maftei (amaftei) void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
3101751cc36SAlex Maftei (amaftei) {
3111751cc36SAlex Maftei (amaftei) 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
3121751cc36SAlex Maftei (amaftei) 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
3131751cc36SAlex Maftei (amaftei) 
3141751cc36SAlex Maftei (amaftei) 	efx_nic_remove_rx(rx_queue);
3151751cc36SAlex Maftei (amaftei) 
3161751cc36SAlex Maftei (amaftei) 	kfree(rx_queue->buffer);
3171751cc36SAlex Maftei (amaftei) 	rx_queue->buffer = NULL;
3181751cc36SAlex Maftei (amaftei) }
3191751cc36SAlex Maftei (amaftei) 
3201751cc36SAlex Maftei (amaftei) /* Unmap a DMA-mapped page.  This function is only called for the final RX
3211751cc36SAlex Maftei (amaftei)  * buffer in a page.
3221751cc36SAlex Maftei (amaftei)  */
3231751cc36SAlex Maftei (amaftei) void efx_unmap_rx_buffer(struct efx_nic *efx,
3241751cc36SAlex Maftei (amaftei) 			 struct efx_rx_buffer *rx_buf)
3251751cc36SAlex Maftei (amaftei) {
3261751cc36SAlex Maftei (amaftei) 	struct page *page = rx_buf->page;
3271751cc36SAlex Maftei (amaftei) 
3281751cc36SAlex Maftei (amaftei) 	if (page) {
3291751cc36SAlex Maftei (amaftei) 		struct efx_rx_page_state *state = page_address(page);
3301751cc36SAlex Maftei (amaftei) 
3311751cc36SAlex Maftei (amaftei) 		dma_unmap_page(&efx->pci_dev->dev,
3321751cc36SAlex Maftei (amaftei) 			       state->dma_addr,
3331751cc36SAlex Maftei (amaftei) 			       PAGE_SIZE << efx->rx_buffer_order,
3341751cc36SAlex Maftei (amaftei) 			       DMA_FROM_DEVICE);
3351751cc36SAlex Maftei (amaftei) 	}
3361751cc36SAlex Maftei (amaftei) }
3371751cc36SAlex Maftei (amaftei) 
3381751cc36SAlex Maftei (amaftei) void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
3391751cc36SAlex Maftei (amaftei) 			 struct efx_rx_buffer *rx_buf,
3401751cc36SAlex Maftei (amaftei) 			 unsigned int num_bufs)
3411751cc36SAlex Maftei (amaftei) {
3421751cc36SAlex Maftei (amaftei) 	do {
3431751cc36SAlex Maftei (amaftei) 		if (rx_buf->page) {
3441751cc36SAlex Maftei (amaftei) 			put_page(rx_buf->page);
3451751cc36SAlex Maftei (amaftei) 			rx_buf->page = NULL;
3461751cc36SAlex Maftei (amaftei) 		}
3471751cc36SAlex Maftei (amaftei) 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
3481751cc36SAlex Maftei (amaftei) 	} while (--num_bufs);
3491751cc36SAlex Maftei (amaftei) }
3501751cc36SAlex Maftei (amaftei) 
3511751cc36SAlex Maftei (amaftei) void efx_rx_slow_fill(struct timer_list *t)
3521751cc36SAlex Maftei (amaftei) {
3531751cc36SAlex Maftei (amaftei) 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
3541751cc36SAlex Maftei (amaftei) 
3551751cc36SAlex Maftei (amaftei) 	/* Post an event to cause NAPI to run and refill the queue */
3561751cc36SAlex Maftei (amaftei) 	efx_nic_generate_fill_event(rx_queue);
3571751cc36SAlex Maftei (amaftei) 	++rx_queue->slow_fill_count;
3581751cc36SAlex Maftei (amaftei) }
3591751cc36SAlex Maftei (amaftei) 
3601751cc36SAlex Maftei (amaftei) void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
3611751cc36SAlex Maftei (amaftei) {
3621751cc36SAlex Maftei (amaftei) 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
3631751cc36SAlex Maftei (amaftei) }
3641751cc36SAlex Maftei (amaftei) 
3651751cc36SAlex Maftei (amaftei) /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
3661751cc36SAlex Maftei (amaftei)  *
3671751cc36SAlex Maftei (amaftei)  * @rx_queue:		Efx RX queue
3681751cc36SAlex Maftei (amaftei)  *
3691751cc36SAlex Maftei (amaftei)  * This allocates a batch of pages, maps them for DMA, and populates
3701751cc36SAlex Maftei (amaftei)  * struct efx_rx_buffers for each one. Return a negative error code or
3711751cc36SAlex Maftei (amaftei)  * 0 on success. If a single page can be used for multiple buffers,
3721751cc36SAlex Maftei (amaftei)  * then the page will either be inserted fully, or not at all.
3731751cc36SAlex Maftei (amaftei)  */
3741751cc36SAlex Maftei (amaftei) static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
3751751cc36SAlex Maftei (amaftei) {
3761751cc36SAlex Maftei (amaftei) 	unsigned int page_offset, index, count;
3771751cc36SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
3781751cc36SAlex Maftei (amaftei) 	struct efx_rx_page_state *state;
3791751cc36SAlex Maftei (amaftei) 	struct efx_rx_buffer *rx_buf;
3801751cc36SAlex Maftei (amaftei) 	dma_addr_t dma_addr;
3811751cc36SAlex Maftei (amaftei) 	struct page *page;
3821751cc36SAlex Maftei (amaftei) 
3831751cc36SAlex Maftei (amaftei) 	count = 0;
3841751cc36SAlex Maftei (amaftei) 	do {
3851751cc36SAlex Maftei (amaftei) 		page = efx_reuse_page(rx_queue);
3861751cc36SAlex Maftei (amaftei) 		if (page == NULL) {
3871751cc36SAlex Maftei (amaftei) 			page = alloc_pages(__GFP_COMP |
3881751cc36SAlex Maftei (amaftei) 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
3891751cc36SAlex Maftei (amaftei) 					   efx->rx_buffer_order);
3901751cc36SAlex Maftei (amaftei) 			if (unlikely(page == NULL))
3911751cc36SAlex Maftei (amaftei) 				return -ENOMEM;
3921751cc36SAlex Maftei (amaftei) 			dma_addr =
3931751cc36SAlex Maftei (amaftei) 				dma_map_page(&efx->pci_dev->dev, page, 0,
3941751cc36SAlex Maftei (amaftei) 					     PAGE_SIZE << efx->rx_buffer_order,
3951751cc36SAlex Maftei (amaftei) 					     DMA_FROM_DEVICE);
3961751cc36SAlex Maftei (amaftei) 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
3971751cc36SAlex Maftei (amaftei) 						       dma_addr))) {
3981751cc36SAlex Maftei (amaftei) 				__free_pages(page, efx->rx_buffer_order);
3991751cc36SAlex Maftei (amaftei) 				return -EIO;
4001751cc36SAlex Maftei (amaftei) 			}
4011751cc36SAlex Maftei (amaftei) 			state = page_address(page);
4021751cc36SAlex Maftei (amaftei) 			state->dma_addr = dma_addr;
4031751cc36SAlex Maftei (amaftei) 		} else {
4041751cc36SAlex Maftei (amaftei) 			state = page_address(page);
4051751cc36SAlex Maftei (amaftei) 			dma_addr = state->dma_addr;
4061751cc36SAlex Maftei (amaftei) 		}
4071751cc36SAlex Maftei (amaftei) 
4081751cc36SAlex Maftei (amaftei) 		dma_addr += sizeof(struct efx_rx_page_state);
4091751cc36SAlex Maftei (amaftei) 		page_offset = sizeof(struct efx_rx_page_state);
4101751cc36SAlex Maftei (amaftei) 
4111751cc36SAlex Maftei (amaftei) 		do {
4121751cc36SAlex Maftei (amaftei) 			index = rx_queue->added_count & rx_queue->ptr_mask;
4131751cc36SAlex Maftei (amaftei) 			rx_buf = efx_rx_buffer(rx_queue, index);
4141751cc36SAlex Maftei (amaftei) 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
4151751cc36SAlex Maftei (amaftei) 					   XDP_PACKET_HEADROOM;
4161751cc36SAlex Maftei (amaftei) 			rx_buf->page = page;
4171751cc36SAlex Maftei (amaftei) 			rx_buf->page_offset = page_offset + efx->rx_ip_align +
4181751cc36SAlex Maftei (amaftei) 					      XDP_PACKET_HEADROOM;
4191751cc36SAlex Maftei (amaftei) 			rx_buf->len = efx->rx_dma_len;
4201751cc36SAlex Maftei (amaftei) 			rx_buf->flags = 0;
4211751cc36SAlex Maftei (amaftei) 			++rx_queue->added_count;
4221751cc36SAlex Maftei (amaftei) 			get_page(page);
4231751cc36SAlex Maftei (amaftei) 			dma_addr += efx->rx_page_buf_step;
4241751cc36SAlex Maftei (amaftei) 			page_offset += efx->rx_page_buf_step;
4251751cc36SAlex Maftei (amaftei) 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
4261751cc36SAlex Maftei (amaftei) 
4271751cc36SAlex Maftei (amaftei) 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
4281751cc36SAlex Maftei (amaftei) 	} while (++count < efx->rx_pages_per_batch);
4291751cc36SAlex Maftei (amaftei) 
4301751cc36SAlex Maftei (amaftei) 	return 0;
4311751cc36SAlex Maftei (amaftei) }
4321751cc36SAlex Maftei (amaftei) 
4331751cc36SAlex Maftei (amaftei) void efx_rx_config_page_split(struct efx_nic *efx)
4341751cc36SAlex Maftei (amaftei) {
4351751cc36SAlex Maftei (amaftei) 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
4361751cc36SAlex Maftei (amaftei) 				      XDP_PACKET_HEADROOM,
4371751cc36SAlex Maftei (amaftei) 				      EFX_RX_BUF_ALIGNMENT);
4381751cc36SAlex Maftei (amaftei) 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
4391751cc36SAlex Maftei (amaftei) 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
4401751cc36SAlex Maftei (amaftei) 		efx->rx_page_buf_step);
4411751cc36SAlex Maftei (amaftei) 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
4421751cc36SAlex Maftei (amaftei) 		efx->rx_bufs_per_page;
4431751cc36SAlex Maftei (amaftei) 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
4441751cc36SAlex Maftei (amaftei) 					       efx->rx_bufs_per_page);
4451751cc36SAlex Maftei (amaftei) }
4461751cc36SAlex Maftei (amaftei) 
4471751cc36SAlex Maftei (amaftei) /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
4481751cc36SAlex Maftei (amaftei)  * @rx_queue:		RX descriptor queue
4491751cc36SAlex Maftei (amaftei)  *
4501751cc36SAlex Maftei (amaftei)  * This will aim to fill the RX descriptor queue up to
4511751cc36SAlex Maftei (amaftei)  * @rx_queue->@max_fill. If there is insufficient atomic
4521751cc36SAlex Maftei (amaftei)  * memory to do so, a slow fill will be scheduled.
4531751cc36SAlex Maftei (amaftei)  *
4541751cc36SAlex Maftei (amaftei)  * The caller must provide serialisation (none is used here). In practise,
4551751cc36SAlex Maftei (amaftei)  * this means this function must run from the NAPI handler, or be called
4561751cc36SAlex Maftei (amaftei)  * when NAPI is disabled.
4571751cc36SAlex Maftei (amaftei)  */
4581751cc36SAlex Maftei (amaftei) void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
4591751cc36SAlex Maftei (amaftei) {
4601751cc36SAlex Maftei (amaftei) 	struct efx_nic *efx = rx_queue->efx;
4611751cc36SAlex Maftei (amaftei) 	unsigned int fill_level, batch_size;
4621751cc36SAlex Maftei (amaftei) 	int space, rc = 0;
4631751cc36SAlex Maftei (amaftei) 
4641751cc36SAlex Maftei (amaftei) 	if (!rx_queue->refill_enabled)
4651751cc36SAlex Maftei (amaftei) 		return;
4661751cc36SAlex Maftei (amaftei) 
4671751cc36SAlex Maftei (amaftei) 	/* Calculate current fill level, and exit if we don't need to fill */
4681751cc36SAlex Maftei (amaftei) 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
4691751cc36SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
4701751cc36SAlex Maftei (amaftei) 	if (fill_level >= rx_queue->fast_fill_trigger)
4711751cc36SAlex Maftei (amaftei) 		goto out;
4721751cc36SAlex Maftei (amaftei) 
4731751cc36SAlex Maftei (amaftei) 	/* Record minimum fill level */
4741751cc36SAlex Maftei (amaftei) 	if (unlikely(fill_level < rx_queue->min_fill)) {
4751751cc36SAlex Maftei (amaftei) 		if (fill_level)
4761751cc36SAlex Maftei (amaftei) 			rx_queue->min_fill = fill_level;
4771751cc36SAlex Maftei (amaftei) 	}
4781751cc36SAlex Maftei (amaftei) 
4791751cc36SAlex Maftei (amaftei) 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
4801751cc36SAlex Maftei (amaftei) 	space = rx_queue->max_fill - fill_level;
4811751cc36SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
4821751cc36SAlex Maftei (amaftei) 
4831751cc36SAlex Maftei (amaftei) 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
4841751cc36SAlex Maftei (amaftei) 		   "RX queue %d fast-filling descriptor ring from"
4851751cc36SAlex Maftei (amaftei) 		   " level %d to level %d\n",
4861751cc36SAlex Maftei (amaftei) 		   efx_rx_queue_index(rx_queue), fill_level,
4871751cc36SAlex Maftei (amaftei) 		   rx_queue->max_fill);
4881751cc36SAlex Maftei (amaftei) 
4891751cc36SAlex Maftei (amaftei) 	do {
4901751cc36SAlex Maftei (amaftei) 		rc = efx_init_rx_buffers(rx_queue, atomic);
4911751cc36SAlex Maftei (amaftei) 		if (unlikely(rc)) {
4921751cc36SAlex Maftei (amaftei) 			/* Ensure that we don't leave the rx queue empty */
4931751cc36SAlex Maftei (amaftei) 			efx_schedule_slow_fill(rx_queue);
4941751cc36SAlex Maftei (amaftei) 			goto out;
4951751cc36SAlex Maftei (amaftei) 		}
4961751cc36SAlex Maftei (amaftei) 	} while ((space -= batch_size) >= batch_size);
4971751cc36SAlex Maftei (amaftei) 
4981751cc36SAlex Maftei (amaftei) 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
4991751cc36SAlex Maftei (amaftei) 		   "RX queue %d fast-filled descriptor ring "
5001751cc36SAlex Maftei (amaftei) 		   "to level %d\n", efx_rx_queue_index(rx_queue),
5011751cc36SAlex Maftei (amaftei) 		   rx_queue->added_count - rx_queue->removed_count);
5021751cc36SAlex Maftei (amaftei) 
5031751cc36SAlex Maftei (amaftei)  out:
5041751cc36SAlex Maftei (amaftei) 	if (rx_queue->notified_count != rx_queue->added_count)
5051751cc36SAlex Maftei (amaftei) 		efx_nic_notify_rx_desc(rx_queue);
5061751cc36SAlex Maftei (amaftei) }
5073d95b884SAlex Maftei (amaftei) 
5083d95b884SAlex Maftei (amaftei) /* Pass a received packet up through GRO.  GRO can handle pages
5093d95b884SAlex Maftei (amaftei)  * regardless of checksum state and skbs with a good checksum.
5103d95b884SAlex Maftei (amaftei)  */
5113d95b884SAlex Maftei (amaftei) void
5123d95b884SAlex Maftei (amaftei) efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
5133d95b884SAlex Maftei (amaftei) 		  unsigned int n_frags, u8 *eh)
5143d95b884SAlex Maftei (amaftei) {
5153d95b884SAlex Maftei (amaftei) 	struct napi_struct *napi = &channel->napi_str;
5163d95b884SAlex Maftei (amaftei) 	struct efx_nic *efx = channel->efx;
5173d95b884SAlex Maftei (amaftei) 	struct sk_buff *skb;
5183d95b884SAlex Maftei (amaftei) 
5193d95b884SAlex Maftei (amaftei) 	skb = napi_get_frags(napi);
5203d95b884SAlex Maftei (amaftei) 	if (unlikely(!skb)) {
5213d95b884SAlex Maftei (amaftei) 		struct efx_rx_queue *rx_queue;
5223d95b884SAlex Maftei (amaftei) 
5233d95b884SAlex Maftei (amaftei) 		rx_queue = efx_channel_get_rx_queue(channel);
5243d95b884SAlex Maftei (amaftei) 		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
5253d95b884SAlex Maftei (amaftei) 		return;
5263d95b884SAlex Maftei (amaftei) 	}
5273d95b884SAlex Maftei (amaftei) 
5283d95b884SAlex Maftei (amaftei) 	if (efx->net_dev->features & NETIF_F_RXHASH)
5293d95b884SAlex Maftei (amaftei) 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
5303d95b884SAlex Maftei (amaftei) 			     PKT_HASH_TYPE_L3);
5313d95b884SAlex Maftei (amaftei) 	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
5323d95b884SAlex Maftei (amaftei) 			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
5333d95b884SAlex Maftei (amaftei) 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
5343d95b884SAlex Maftei (amaftei) 
5353d95b884SAlex Maftei (amaftei) 	for (;;) {
5363d95b884SAlex Maftei (amaftei) 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5373d95b884SAlex Maftei (amaftei) 				   rx_buf->page, rx_buf->page_offset,
5383d95b884SAlex Maftei (amaftei) 				   rx_buf->len);
5393d95b884SAlex Maftei (amaftei) 		rx_buf->page = NULL;
5403d95b884SAlex Maftei (amaftei) 		skb->len += rx_buf->len;
5413d95b884SAlex Maftei (amaftei) 		if (skb_shinfo(skb)->nr_frags == n_frags)
5423d95b884SAlex Maftei (amaftei) 			break;
5433d95b884SAlex Maftei (amaftei) 
5443d95b884SAlex Maftei (amaftei) 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
5453d95b884SAlex Maftei (amaftei) 	}
5463d95b884SAlex Maftei (amaftei) 
5473d95b884SAlex Maftei (amaftei) 	skb->data_len = skb->len;
5483d95b884SAlex Maftei (amaftei) 	skb->truesize += n_frags * efx->rx_buffer_truesize;
5493d95b884SAlex Maftei (amaftei) 
5503d95b884SAlex Maftei (amaftei) 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
5513d95b884SAlex Maftei (amaftei) 
5523d95b884SAlex Maftei (amaftei) 	napi_gro_frags(napi);
5533d95b884SAlex Maftei (amaftei) }
554