1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include "efx.h"
15 #include "nic.h"
16 #include "rx_common.h"
17 
18 /* This is the percentage fill level below which new RX descriptors
19  * will be added to the RX descriptor ring.
20  */
21 static unsigned int rx_refill_threshold;
22 module_param(rx_refill_threshold, uint, 0444);
23 MODULE_PARM_DESC(rx_refill_threshold,
24 		 "RX descriptor ring refill threshold (%)");
25 
26 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
27  * ring, this number is divided by the number of buffers per page to calculate
28  * the number of pages to store in the RX page recycle ring.
29  */
30 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
31 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32 
33 /* RX maximum head room required.
34  *
35  * This must be at least 1 to prevent overflow, plus one packet-worth
36  * to allow pipelined receives.
37  */
38 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
39 
40 /* Check the RX page recycle ring for a page that can be reused. */
41 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
42 {
43 	struct efx_nic *efx = rx_queue->efx;
44 	struct efx_rx_page_state *state;
45 	unsigned int index;
46 	struct page *page;
47 
48 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
49 	page = rx_queue->page_ring[index];
50 	if (page == NULL)
51 		return NULL;
52 
53 	rx_queue->page_ring[index] = NULL;
54 	/* page_remove cannot exceed page_add. */
55 	if (rx_queue->page_remove != rx_queue->page_add)
56 		++rx_queue->page_remove;
57 
58 	/* If page_count is 1 then we hold the only reference to this page. */
59 	if (page_count(page) == 1) {
60 		++rx_queue->page_recycle_count;
61 		return page;
62 	} else {
63 		state = page_address(page);
64 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
65 			       PAGE_SIZE << efx->rx_buffer_order,
66 			       DMA_FROM_DEVICE);
67 		put_page(page);
68 		++rx_queue->page_recycle_failed;
69 	}
70 
71 	return NULL;
72 }
73 
74 /* Attempt to recycle the page if there is an RX recycle ring; the page can
75  * only be added if this is the final RX buffer, to prevent pages being used in
76  * the descriptor ring and appearing in the recycle ring simultaneously.
77  */
78 static void efx_recycle_rx_page(struct efx_channel *channel,
79 				struct efx_rx_buffer *rx_buf)
80 {
81 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
82 	struct efx_nic *efx = rx_queue->efx;
83 	struct page *page = rx_buf->page;
84 	unsigned int index;
85 
86 	/* Only recycle the page after processing the final buffer. */
87 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
88 		return;
89 
90 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
91 	if (rx_queue->page_ring[index] == NULL) {
92 		unsigned int read_index = rx_queue->page_remove &
93 			rx_queue->page_ptr_mask;
94 
95 		/* The next slot in the recycle ring is available, but
96 		 * increment page_remove if the read pointer currently
97 		 * points here.
98 		 */
99 		if (read_index == index)
100 			++rx_queue->page_remove;
101 		rx_queue->page_ring[index] = page;
102 		++rx_queue->page_add;
103 		return;
104 	}
105 	++rx_queue->page_recycle_full;
106 	efx_unmap_rx_buffer(efx, rx_buf);
107 	put_page(rx_buf->page);
108 }
109 
110 /* Recycle the pages that are used by buffers that have just been received. */
111 void efx_recycle_rx_pages(struct efx_channel *channel,
112 			  struct efx_rx_buffer *rx_buf,
113 			  unsigned int n_frags)
114 {
115 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
116 
117 	do {
118 		efx_recycle_rx_page(channel, rx_buf);
119 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
120 	} while (--n_frags);
121 }
122 
123 void efx_discard_rx_packet(struct efx_channel *channel,
124 			   struct efx_rx_buffer *rx_buf,
125 			   unsigned int n_frags)
126 {
127 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
128 
129 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
130 
131 	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
132 }
133 
134 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
135 {
136 	unsigned int bufs_in_recycle_ring, page_ring_size;
137 	struct efx_nic *efx = rx_queue->efx;
138 
139 	/* Set the RX recycle ring size */
140 #ifdef CONFIG_PPC64
141 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
142 #else
143 	if (iommu_present(&pci_bus_type))
144 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
145 	else
146 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
147 #endif /* CONFIG_PPC64 */
148 
149 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
150 					    efx->rx_bufs_per_page);
151 	rx_queue->page_ring = kcalloc(page_ring_size,
152 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
153 	if (!rx_queue->page_ring)
154 		rx_queue->page_ptr_mask = 0;
155 	else
156 		rx_queue->page_ptr_mask = page_ring_size - 1;
157 }
158 
159 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
160 {
161 	struct efx_nic *efx = rx_queue->efx;
162 	int i;
163 
164 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
165 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
166 		struct page *page = rx_queue->page_ring[i];
167 		struct efx_rx_page_state *state;
168 
169 		if (page == NULL)
170 			continue;
171 
172 		state = page_address(page);
173 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
174 			       PAGE_SIZE << efx->rx_buffer_order,
175 			       DMA_FROM_DEVICE);
176 		put_page(page);
177 	}
178 	kfree(rx_queue->page_ring);
179 	rx_queue->page_ring = NULL;
180 }
181 
182 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
183 			       struct efx_rx_buffer *rx_buf)
184 {
185 	/* Release the page reference we hold for the buffer. */
186 	if (rx_buf->page)
187 		put_page(rx_buf->page);
188 
189 	/* If this is the last buffer in a page, unmap and free it. */
190 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
191 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
192 		efx_free_rx_buffers(rx_queue, rx_buf, 1);
193 	}
194 	rx_buf->page = NULL;
195 }
196 
197 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
198 {
199 	struct efx_nic *efx = rx_queue->efx;
200 	unsigned int entries;
201 	int rc;
202 
203 	/* Create the smallest power-of-two aligned ring */
204 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
205 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
206 	rx_queue->ptr_mask = entries - 1;
207 
208 	netif_dbg(efx, probe, efx->net_dev,
209 		  "creating RX queue %d size %#x mask %#x\n",
210 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
211 		  rx_queue->ptr_mask);
212 
213 	/* Allocate RX buffers */
214 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
215 				   GFP_KERNEL);
216 	if (!rx_queue->buffer)
217 		return -ENOMEM;
218 
219 	rc = efx_nic_probe_rx(rx_queue);
220 	if (rc) {
221 		kfree(rx_queue->buffer);
222 		rx_queue->buffer = NULL;
223 	}
224 
225 	return rc;
226 }
227 
228 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
229 {
230 	unsigned int max_fill, trigger, max_trigger;
231 	struct efx_nic *efx = rx_queue->efx;
232 	int rc = 0;
233 
234 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
235 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
236 
237 	/* Initialise ptr fields */
238 	rx_queue->added_count = 0;
239 	rx_queue->notified_count = 0;
240 	rx_queue->removed_count = 0;
241 	rx_queue->min_fill = -1U;
242 	efx_init_rx_recycle_ring(rx_queue);
243 
244 	rx_queue->page_remove = 0;
245 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
246 	rx_queue->page_recycle_count = 0;
247 	rx_queue->page_recycle_failed = 0;
248 	rx_queue->page_recycle_full = 0;
249 
250 	/* Initialise limit fields */
251 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
252 	max_trigger =
253 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
254 	if (rx_refill_threshold != 0) {
255 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
256 		if (trigger > max_trigger)
257 			trigger = max_trigger;
258 	} else {
259 		trigger = max_trigger;
260 	}
261 
262 	rx_queue->max_fill = max_fill;
263 	rx_queue->fast_fill_trigger = trigger;
264 	rx_queue->refill_enabled = true;
265 
266 	/* Initialise XDP queue information */
267 	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
268 			      rx_queue->core_index, 0);
269 
270 	if (rc) {
271 		netif_err(efx, rx_err, efx->net_dev,
272 			  "Failure to initialise XDP queue information rc=%d\n",
273 			  rc);
274 		efx->xdp_rxq_info_failed = true;
275 	} else {
276 		rx_queue->xdp_rxq_info_valid = true;
277 	}
278 
279 	/* Set up RX descriptor ring */
280 	efx_nic_init_rx(rx_queue);
281 }
282 
283 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
284 {
285 	struct efx_rx_buffer *rx_buf;
286 	int i;
287 
288 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
289 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
290 
291 	del_timer_sync(&rx_queue->slow_fill);
292 
293 	/* Release RX buffers from the current read ptr to the write ptr */
294 	if (rx_queue->buffer) {
295 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
296 		     i++) {
297 			unsigned int index = i & rx_queue->ptr_mask;
298 
299 			rx_buf = efx_rx_buffer(rx_queue, index);
300 			efx_fini_rx_buffer(rx_queue, rx_buf);
301 		}
302 	}
303 
304 	efx_fini_rx_recycle_ring(rx_queue);
305 
306 	if (rx_queue->xdp_rxq_info_valid)
307 		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
308 
309 	rx_queue->xdp_rxq_info_valid = false;
310 }
311 
312 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
313 {
314 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
315 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
316 
317 	efx_nic_remove_rx(rx_queue);
318 
319 	kfree(rx_queue->buffer);
320 	rx_queue->buffer = NULL;
321 }
322 
323 /* Unmap a DMA-mapped page.  This function is only called for the final RX
324  * buffer in a page.
325  */
326 void efx_unmap_rx_buffer(struct efx_nic *efx,
327 			 struct efx_rx_buffer *rx_buf)
328 {
329 	struct page *page = rx_buf->page;
330 
331 	if (page) {
332 		struct efx_rx_page_state *state = page_address(page);
333 
334 		dma_unmap_page(&efx->pci_dev->dev,
335 			       state->dma_addr,
336 			       PAGE_SIZE << efx->rx_buffer_order,
337 			       DMA_FROM_DEVICE);
338 	}
339 }
340 
341 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
342 			 struct efx_rx_buffer *rx_buf,
343 			 unsigned int num_bufs)
344 {
345 	do {
346 		if (rx_buf->page) {
347 			put_page(rx_buf->page);
348 			rx_buf->page = NULL;
349 		}
350 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
351 	} while (--num_bufs);
352 }
353 
354 void efx_rx_slow_fill(struct timer_list *t)
355 {
356 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
357 
358 	/* Post an event to cause NAPI to run and refill the queue */
359 	efx_nic_generate_fill_event(rx_queue);
360 	++rx_queue->slow_fill_count;
361 }
362 
363 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
364 {
365 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
366 }
367 
368 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
369  *
370  * @rx_queue:		Efx RX queue
371  *
372  * This allocates a batch of pages, maps them for DMA, and populates
373  * struct efx_rx_buffers for each one. Return a negative error code or
374  * 0 on success. If a single page can be used for multiple buffers,
375  * then the page will either be inserted fully, or not at all.
376  */
377 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
378 {
379 	unsigned int page_offset, index, count;
380 	struct efx_nic *efx = rx_queue->efx;
381 	struct efx_rx_page_state *state;
382 	struct efx_rx_buffer *rx_buf;
383 	dma_addr_t dma_addr;
384 	struct page *page;
385 
386 	count = 0;
387 	do {
388 		page = efx_reuse_page(rx_queue);
389 		if (page == NULL) {
390 			page = alloc_pages(__GFP_COMP |
391 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
392 					   efx->rx_buffer_order);
393 			if (unlikely(page == NULL))
394 				return -ENOMEM;
395 			dma_addr =
396 				dma_map_page(&efx->pci_dev->dev, page, 0,
397 					     PAGE_SIZE << efx->rx_buffer_order,
398 					     DMA_FROM_DEVICE);
399 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
400 						       dma_addr))) {
401 				__free_pages(page, efx->rx_buffer_order);
402 				return -EIO;
403 			}
404 			state = page_address(page);
405 			state->dma_addr = dma_addr;
406 		} else {
407 			state = page_address(page);
408 			dma_addr = state->dma_addr;
409 		}
410 
411 		dma_addr += sizeof(struct efx_rx_page_state);
412 		page_offset = sizeof(struct efx_rx_page_state);
413 
414 		do {
415 			index = rx_queue->added_count & rx_queue->ptr_mask;
416 			rx_buf = efx_rx_buffer(rx_queue, index);
417 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
418 					   EFX_XDP_HEADROOM;
419 			rx_buf->page = page;
420 			rx_buf->page_offset = page_offset + efx->rx_ip_align +
421 					      EFX_XDP_HEADROOM;
422 			rx_buf->len = efx->rx_dma_len;
423 			rx_buf->flags = 0;
424 			++rx_queue->added_count;
425 			get_page(page);
426 			dma_addr += efx->rx_page_buf_step;
427 			page_offset += efx->rx_page_buf_step;
428 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
429 
430 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
431 	} while (++count < efx->rx_pages_per_batch);
432 
433 	return 0;
434 }
435 
436 void efx_rx_config_page_split(struct efx_nic *efx)
437 {
438 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
439 				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
440 				      EFX_RX_BUF_ALIGNMENT);
441 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
442 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
443 		efx->rx_page_buf_step);
444 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
445 		efx->rx_bufs_per_page;
446 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
447 					       efx->rx_bufs_per_page);
448 }
449 
450 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
451  * @rx_queue:		RX descriptor queue
452  *
453  * This will aim to fill the RX descriptor queue up to
454  * @rx_queue->@max_fill. If there is insufficient atomic
455  * memory to do so, a slow fill will be scheduled.
456  *
457  * The caller must provide serialisation (none is used here). In practise,
458  * this means this function must run from the NAPI handler, or be called
459  * when NAPI is disabled.
460  */
461 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
462 {
463 	struct efx_nic *efx = rx_queue->efx;
464 	unsigned int fill_level, batch_size;
465 	int space, rc = 0;
466 
467 	if (!rx_queue->refill_enabled)
468 		return;
469 
470 	/* Calculate current fill level, and exit if we don't need to fill */
471 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
472 	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
473 	if (fill_level >= rx_queue->fast_fill_trigger)
474 		goto out;
475 
476 	/* Record minimum fill level */
477 	if (unlikely(fill_level < rx_queue->min_fill)) {
478 		if (fill_level)
479 			rx_queue->min_fill = fill_level;
480 	}
481 
482 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
483 	space = rx_queue->max_fill - fill_level;
484 	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
485 
486 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
487 		   "RX queue %d fast-filling descriptor ring from"
488 		   " level %d to level %d\n",
489 		   efx_rx_queue_index(rx_queue), fill_level,
490 		   rx_queue->max_fill);
491 
492 	do {
493 		rc = efx_init_rx_buffers(rx_queue, atomic);
494 		if (unlikely(rc)) {
495 			/* Ensure that we don't leave the rx queue empty */
496 			efx_schedule_slow_fill(rx_queue);
497 			goto out;
498 		}
499 	} while ((space -= batch_size) >= batch_size);
500 
501 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
502 		   "RX queue %d fast-filled descriptor ring "
503 		   "to level %d\n", efx_rx_queue_index(rx_queue),
504 		   rx_queue->added_count - rx_queue->removed_count);
505 
506  out:
507 	if (rx_queue->notified_count != rx_queue->added_count)
508 		efx_nic_notify_rx_desc(rx_queue);
509 }
510 
511 /* Pass a received packet up through GRO.  GRO can handle pages
512  * regardless of checksum state and skbs with a good checksum.
513  */
514 void
515 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
516 		  unsigned int n_frags, u8 *eh, __wsum csum)
517 {
518 	struct napi_struct *napi = &channel->napi_str;
519 	struct efx_nic *efx = channel->efx;
520 	struct sk_buff *skb;
521 
522 	skb = napi_get_frags(napi);
523 	if (unlikely(!skb)) {
524 		struct efx_rx_queue *rx_queue;
525 
526 		rx_queue = efx_channel_get_rx_queue(channel);
527 		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
528 		return;
529 	}
530 
531 	if (efx->net_dev->features & NETIF_F_RXHASH &&
532 	    efx_rx_buf_hash_valid(efx, eh))
533 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
534 			     PKT_HASH_TYPE_L3);
535 	if (csum) {
536 		skb->csum = csum;
537 		skb->ip_summed = CHECKSUM_COMPLETE;
538 	} else {
539 		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
540 				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
541 	}
542 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
543 
544 	for (;;) {
545 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
546 				   rx_buf->page, rx_buf->page_offset,
547 				   rx_buf->len);
548 		rx_buf->page = NULL;
549 		skb->len += rx_buf->len;
550 		if (skb_shinfo(skb)->nr_frags == n_frags)
551 			break;
552 
553 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
554 	}
555 
556 	skb->data_len = skb->len;
557 	skb->truesize += n_frags * efx->rx_buffer_truesize;
558 
559 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
560 
561 	napi_gro_frags(napi);
562 }
563 
564 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
565  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
566  */
567 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
568 {
569 	struct list_head *head = &efx->rss_context.list;
570 	struct efx_rss_context *ctx, *new;
571 	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
572 
573 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
574 
575 	/* Search for first gap in the numbering */
576 	list_for_each_entry(ctx, head, list) {
577 		if (ctx->user_id != id)
578 			break;
579 		id++;
580 		/* Check for wrap.  If this happens, we have nearly 2^32
581 		 * allocated RSS contexts, which seems unlikely.
582 		 */
583 		if (WARN_ON_ONCE(!id))
584 			return NULL;
585 	}
586 
587 	/* Create the new entry */
588 	new = kmalloc(sizeof(*new), GFP_KERNEL);
589 	if (!new)
590 		return NULL;
591 	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
592 	new->rx_hash_udp_4tuple = false;
593 
594 	/* Insert the new entry into the gap */
595 	new->user_id = id;
596 	list_add_tail(&new->list, &ctx->list);
597 	return new;
598 }
599 
600 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
601 {
602 	struct list_head *head = &efx->rss_context.list;
603 	struct efx_rss_context *ctx;
604 
605 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
606 
607 	list_for_each_entry(ctx, head, list)
608 		if (ctx->user_id == id)
609 			return ctx;
610 	return NULL;
611 }
612 
613 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
614 {
615 	list_del(&ctx->list);
616 	kfree(ctx);
617 }
618 
619 void efx_set_default_rx_indir_table(struct efx_nic *efx,
620 				    struct efx_rss_context *ctx)
621 {
622 	size_t i;
623 
624 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
625 		ctx->rx_indir_table[i] =
626 			ethtool_rxfh_indir_default(i, efx->rss_spread);
627 }
628 
629 /**
630  * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
631  * @spec: Specification to test
632  *
633  * Return: %true if the specification is a non-drop RX filter that
634  * matches a local MAC address I/G bit value of 1 or matches a local
635  * IPv4 or IPv6 address value in the respective multicast address
636  * range.  Otherwise %false.
637  */
638 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
639 {
640 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
641 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
642 		return false;
643 
644 	if (spec->match_flags &
645 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
646 	    is_multicast_ether_addr(spec->loc_mac))
647 		return true;
648 
649 	if ((spec->match_flags &
650 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
651 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
652 		if (spec->ether_type == htons(ETH_P_IP) &&
653 		    ipv4_is_multicast(spec->loc_host[0]))
654 			return true;
655 		if (spec->ether_type == htons(ETH_P_IPV6) &&
656 		    ((const u8 *)spec->loc_host)[0] == 0xff)
657 			return true;
658 	}
659 
660 	return false;
661 }
662 
663 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
664 			   const struct efx_filter_spec *right)
665 {
666 	if ((left->match_flags ^ right->match_flags) |
667 	    ((left->flags ^ right->flags) &
668 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
669 		return false;
670 
671 	return memcmp(&left->outer_vid, &right->outer_vid,
672 		      sizeof(struct efx_filter_spec) -
673 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
674 }
675 
676 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
677 {
678 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
679 	return jhash2((const u32 *)&spec->outer_vid,
680 		      (sizeof(struct efx_filter_spec) -
681 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
682 		      0);
683 }
684 
685 #ifdef CONFIG_RFS_ACCEL
686 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
687 			bool *force)
688 {
689 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
690 		/* ARFS is currently updating this entry, leave it */
691 		return false;
692 	}
693 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
694 		/* ARFS tried and failed to update this, so it's probably out
695 		 * of date.  Remove the filter and the ARFS rule entry.
696 		 */
697 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
698 		*force = true;
699 		return true;
700 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
701 		/* ARFS has moved on, so old filter is not needed.  Since we did
702 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
703 		 * not be removed by efx_rps_hash_del() subsequently.
704 		 */
705 		*force = true;
706 		return true;
707 	}
708 	/* Remove it iff ARFS wants to. */
709 	return true;
710 }
711 
712 static
713 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
714 				       const struct efx_filter_spec *spec)
715 {
716 	u32 hash = efx_filter_spec_hash(spec);
717 
718 	lockdep_assert_held(&efx->rps_hash_lock);
719 	if (!efx->rps_hash_table)
720 		return NULL;
721 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
722 }
723 
724 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
725 					const struct efx_filter_spec *spec)
726 {
727 	struct efx_arfs_rule *rule;
728 	struct hlist_head *head;
729 	struct hlist_node *node;
730 
731 	head = efx_rps_hash_bucket(efx, spec);
732 	if (!head)
733 		return NULL;
734 	hlist_for_each(node, head) {
735 		rule = container_of(node, struct efx_arfs_rule, node);
736 		if (efx_filter_spec_equal(spec, &rule->spec))
737 			return rule;
738 	}
739 	return NULL;
740 }
741 
742 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
743 				       const struct efx_filter_spec *spec,
744 				       bool *new)
745 {
746 	struct efx_arfs_rule *rule;
747 	struct hlist_head *head;
748 	struct hlist_node *node;
749 
750 	head = efx_rps_hash_bucket(efx, spec);
751 	if (!head)
752 		return NULL;
753 	hlist_for_each(node, head) {
754 		rule = container_of(node, struct efx_arfs_rule, node);
755 		if (efx_filter_spec_equal(spec, &rule->spec)) {
756 			*new = false;
757 			return rule;
758 		}
759 	}
760 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
761 	*new = true;
762 	if (rule) {
763 		memcpy(&rule->spec, spec, sizeof(rule->spec));
764 		hlist_add_head(&rule->node, head);
765 	}
766 	return rule;
767 }
768 
769 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
770 {
771 	struct efx_arfs_rule *rule;
772 	struct hlist_head *head;
773 	struct hlist_node *node;
774 
775 	head = efx_rps_hash_bucket(efx, spec);
776 	if (WARN_ON(!head))
777 		return;
778 	hlist_for_each(node, head) {
779 		rule = container_of(node, struct efx_arfs_rule, node);
780 		if (efx_filter_spec_equal(spec, &rule->spec)) {
781 			/* Someone already reused the entry.  We know that if
782 			 * this check doesn't fire (i.e. filter_id == REMOVING)
783 			 * then the REMOVING mark was put there by our caller,
784 			 * because caller is holding a lock on filter table and
785 			 * only holders of that lock set REMOVING.
786 			 */
787 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
788 				return;
789 			hlist_del(node);
790 			kfree(rule);
791 			return;
792 		}
793 	}
794 	/* We didn't find it. */
795 	WARN_ON(1);
796 }
797 #endif
798 
799 int efx_probe_filters(struct efx_nic *efx)
800 {
801 	int rc;
802 
803 	mutex_lock(&efx->mac_lock);
804 	down_write(&efx->filter_sem);
805 	rc = efx->type->filter_table_probe(efx);
806 	if (rc)
807 		goto out_unlock;
808 
809 #ifdef CONFIG_RFS_ACCEL
810 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
811 		struct efx_channel *channel;
812 		int i, success = 1;
813 
814 		efx_for_each_channel(channel, efx) {
815 			channel->rps_flow_id =
816 				kcalloc(efx->type->max_rx_ip_filters,
817 					sizeof(*channel->rps_flow_id),
818 					GFP_KERNEL);
819 			if (!channel->rps_flow_id)
820 				success = 0;
821 			else
822 				for (i = 0;
823 				     i < efx->type->max_rx_ip_filters;
824 				     ++i)
825 					channel->rps_flow_id[i] =
826 						RPS_FLOW_ID_INVALID;
827 			channel->rfs_expire_index = 0;
828 			channel->rfs_filter_count = 0;
829 		}
830 
831 		if (!success) {
832 			efx_for_each_channel(channel, efx)
833 				kfree(channel->rps_flow_id);
834 			efx->type->filter_table_remove(efx);
835 			rc = -ENOMEM;
836 			goto out_unlock;
837 		}
838 	}
839 #endif
840 out_unlock:
841 	up_write(&efx->filter_sem);
842 	mutex_unlock(&efx->mac_lock);
843 	return rc;
844 }
845 
846 void efx_remove_filters(struct efx_nic *efx)
847 {
848 #ifdef CONFIG_RFS_ACCEL
849 	struct efx_channel *channel;
850 
851 	efx_for_each_channel(channel, efx) {
852 		cancel_delayed_work_sync(&channel->filter_work);
853 		kfree(channel->rps_flow_id);
854 		channel->rps_flow_id = NULL;
855 	}
856 #endif
857 	down_write(&efx->filter_sem);
858 	efx->type->filter_table_remove(efx);
859 	up_write(&efx->filter_sem);
860 }
861 
862 #ifdef CONFIG_RFS_ACCEL
863 
864 static void efx_filter_rfs_work(struct work_struct *data)
865 {
866 	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
867 							      work);
868 	struct efx_nic *efx = netdev_priv(req->net_dev);
869 	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
870 	int slot_idx = req - efx->rps_slot;
871 	struct efx_arfs_rule *rule;
872 	u16 arfs_id = 0;
873 	int rc;
874 
875 	rc = efx->type->filter_insert(efx, &req->spec, true);
876 	if (rc >= 0)
877 		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
878 		rc %= efx->type->max_rx_ip_filters;
879 	if (efx->rps_hash_table) {
880 		spin_lock_bh(&efx->rps_hash_lock);
881 		rule = efx_rps_hash_find(efx, &req->spec);
882 		/* The rule might have already gone, if someone else's request
883 		 * for the same spec was already worked and then expired before
884 		 * we got around to our work.  In that case we have nothing
885 		 * tying us to an arfs_id, meaning that as soon as the filter
886 		 * is considered for expiry it will be removed.
887 		 */
888 		if (rule) {
889 			if (rc < 0)
890 				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
891 			else
892 				rule->filter_id = rc;
893 			arfs_id = rule->arfs_id;
894 		}
895 		spin_unlock_bh(&efx->rps_hash_lock);
896 	}
897 	if (rc >= 0) {
898 		/* Remember this so we can check whether to expire the filter
899 		 * later.
900 		 */
901 		mutex_lock(&efx->rps_mutex);
902 		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
903 			channel->rfs_filter_count++;
904 		channel->rps_flow_id[rc] = req->flow_id;
905 		mutex_unlock(&efx->rps_mutex);
906 
907 		if (req->spec.ether_type == htons(ETH_P_IP))
908 			netif_info(efx, rx_status, efx->net_dev,
909 				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
910 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
911 				   req->spec.rem_host, ntohs(req->spec.rem_port),
912 				   req->spec.loc_host, ntohs(req->spec.loc_port),
913 				   req->rxq_index, req->flow_id, rc, arfs_id);
914 		else
915 			netif_info(efx, rx_status, efx->net_dev,
916 				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
917 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
918 				   req->spec.rem_host, ntohs(req->spec.rem_port),
919 				   req->spec.loc_host, ntohs(req->spec.loc_port),
920 				   req->rxq_index, req->flow_id, rc, arfs_id);
921 		channel->n_rfs_succeeded++;
922 	} else {
923 		if (req->spec.ether_type == htons(ETH_P_IP))
924 			netif_dbg(efx, rx_status, efx->net_dev,
925 				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
926 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
927 				  req->spec.rem_host, ntohs(req->spec.rem_port),
928 				  req->spec.loc_host, ntohs(req->spec.loc_port),
929 				  req->rxq_index, req->flow_id, rc, arfs_id);
930 		else
931 			netif_dbg(efx, rx_status, efx->net_dev,
932 				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
933 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
934 				  req->spec.rem_host, ntohs(req->spec.rem_port),
935 				  req->spec.loc_host, ntohs(req->spec.loc_port),
936 				  req->rxq_index, req->flow_id, rc, arfs_id);
937 		channel->n_rfs_failed++;
938 		/* We're overloading the NIC's filter tables, so let's do a
939 		 * chunk of extra expiry work.
940 		 */
941 		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
942 						     100u));
943 	}
944 
945 	/* Release references */
946 	clear_bit(slot_idx, &efx->rps_slot_map);
947 	dev_put(req->net_dev);
948 }
949 
950 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
951 		   u16 rxq_index, u32 flow_id)
952 {
953 	struct efx_nic *efx = netdev_priv(net_dev);
954 	struct efx_async_filter_insertion *req;
955 	struct efx_arfs_rule *rule;
956 	struct flow_keys fk;
957 	int slot_idx;
958 	bool new;
959 	int rc;
960 
961 	/* find a free slot */
962 	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
963 		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
964 			break;
965 	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
966 		return -EBUSY;
967 
968 	if (flow_id == RPS_FLOW_ID_INVALID) {
969 		rc = -EINVAL;
970 		goto out_clear;
971 	}
972 
973 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
974 		rc = -EPROTONOSUPPORT;
975 		goto out_clear;
976 	}
977 
978 	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
979 		rc = -EPROTONOSUPPORT;
980 		goto out_clear;
981 	}
982 	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
983 		rc = -EPROTONOSUPPORT;
984 		goto out_clear;
985 	}
986 
987 	req = efx->rps_slot + slot_idx;
988 	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
989 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
990 			   rxq_index);
991 	req->spec.match_flags =
992 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
993 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
994 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
995 	req->spec.ether_type = fk.basic.n_proto;
996 	req->spec.ip_proto = fk.basic.ip_proto;
997 
998 	if (fk.basic.n_proto == htons(ETH_P_IP)) {
999 		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
1000 		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
1001 	} else {
1002 		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
1003 		       sizeof(struct in6_addr));
1004 		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
1005 		       sizeof(struct in6_addr));
1006 	}
1007 
1008 	req->spec.rem_port = fk.ports.src;
1009 	req->spec.loc_port = fk.ports.dst;
1010 
1011 	if (efx->rps_hash_table) {
1012 		/* Add it to ARFS hash table */
1013 		spin_lock(&efx->rps_hash_lock);
1014 		rule = efx_rps_hash_add(efx, &req->spec, &new);
1015 		if (!rule) {
1016 			rc = -ENOMEM;
1017 			goto out_unlock;
1018 		}
1019 		if (new)
1020 			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1021 		rc = rule->arfs_id;
1022 		/* Skip if existing or pending filter already does the right thing */
1023 		if (!new && rule->rxq_index == rxq_index &&
1024 		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1025 			goto out_unlock;
1026 		rule->rxq_index = rxq_index;
1027 		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1028 		spin_unlock(&efx->rps_hash_lock);
1029 	} else {
1030 		/* Without an ARFS hash table, we just use arfs_id 0 for all
1031 		 * filters.  This means if multiple flows hash to the same
1032 		 * flow_id, all but the most recently touched will be eligible
1033 		 * for expiry.
1034 		 */
1035 		rc = 0;
1036 	}
1037 
1038 	/* Queue the request */
1039 	dev_hold(req->net_dev = net_dev);
1040 	INIT_WORK(&req->work, efx_filter_rfs_work);
1041 	req->rxq_index = rxq_index;
1042 	req->flow_id = flow_id;
1043 	schedule_work(&req->work);
1044 	return rc;
1045 out_unlock:
1046 	spin_unlock(&efx->rps_hash_lock);
1047 out_clear:
1048 	clear_bit(slot_idx, &efx->rps_slot_map);
1049 	return rc;
1050 }
1051 
1052 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1053 {
1054 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1055 	struct efx_nic *efx = channel->efx;
1056 	unsigned int index, size, start;
1057 	u32 flow_id;
1058 
1059 	if (!mutex_trylock(&efx->rps_mutex))
1060 		return false;
1061 	expire_one = efx->type->filter_rfs_expire_one;
1062 	index = channel->rfs_expire_index;
1063 	start = index;
1064 	size = efx->type->max_rx_ip_filters;
1065 	while (quota) {
1066 		flow_id = channel->rps_flow_id[index];
1067 
1068 		if (flow_id != RPS_FLOW_ID_INVALID) {
1069 			quota--;
1070 			if (expire_one(efx, flow_id, index)) {
1071 				netif_info(efx, rx_status, efx->net_dev,
1072 					   "expired filter %d [channel %u flow %u]\n",
1073 					   index, channel->channel, flow_id);
1074 				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1075 				channel->rfs_filter_count--;
1076 			}
1077 		}
1078 		if (++index == size)
1079 			index = 0;
1080 		/* If we were called with a quota that exceeds the total number
1081 		 * of filters in the table (which shouldn't happen, but could
1082 		 * if two callers race), ensure that we don't loop forever -
1083 		 * stop when we've examined every row of the table.
1084 		 */
1085 		if (index == start)
1086 			break;
1087 	}
1088 
1089 	channel->rfs_expire_index = index;
1090 	mutex_unlock(&efx->rps_mutex);
1091 	return true;
1092 }
1093 
1094 #endif /* CONFIG_RFS_ACCEL */
1095