1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include "efx.h"
15 #include "nic.h"
16 #include "rx_common.h"
17 
18 /* This is the percentage fill level below which new RX descriptors
19  * will be added to the RX descriptor ring.
20  */
21 static unsigned int rx_refill_threshold;
22 module_param(rx_refill_threshold, uint, 0444);
23 MODULE_PARM_DESC(rx_refill_threshold,
24 		 "RX descriptor ring refill threshold (%)");
25 
26 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
27  * ring, this number is divided by the number of buffers per page to calculate
28  * the number of pages to store in the RX page recycle ring.
29  */
30 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
31 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32 
33 /* RX maximum head room required.
34  *
35  * This must be at least 1 to prevent overflow, plus one packet-worth
36  * to allow pipelined receives.
37  */
38 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
39 
40 /* Check the RX page recycle ring for a page that can be reused. */
41 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
42 {
43 	struct efx_nic *efx = rx_queue->efx;
44 	struct efx_rx_page_state *state;
45 	unsigned int index;
46 	struct page *page;
47 
48 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
49 	page = rx_queue->page_ring[index];
50 	if (page == NULL)
51 		return NULL;
52 
53 	rx_queue->page_ring[index] = NULL;
54 	/* page_remove cannot exceed page_add. */
55 	if (rx_queue->page_remove != rx_queue->page_add)
56 		++rx_queue->page_remove;
57 
58 	/* If page_count is 1 then we hold the only reference to this page. */
59 	if (page_count(page) == 1) {
60 		++rx_queue->page_recycle_count;
61 		return page;
62 	} else {
63 		state = page_address(page);
64 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
65 			       PAGE_SIZE << efx->rx_buffer_order,
66 			       DMA_FROM_DEVICE);
67 		put_page(page);
68 		++rx_queue->page_recycle_failed;
69 	}
70 
71 	return NULL;
72 }
73 
74 /* Attempt to recycle the page if there is an RX recycle ring; the page can
75  * only be added if this is the final RX buffer, to prevent pages being used in
76  * the descriptor ring and appearing in the recycle ring simultaneously.
77  */
78 static void efx_recycle_rx_page(struct efx_channel *channel,
79 				struct efx_rx_buffer *rx_buf)
80 {
81 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
82 	struct efx_nic *efx = rx_queue->efx;
83 	struct page *page = rx_buf->page;
84 	unsigned int index;
85 
86 	/* Only recycle the page after processing the final buffer. */
87 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
88 		return;
89 
90 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
91 	if (rx_queue->page_ring[index] == NULL) {
92 		unsigned int read_index = rx_queue->page_remove &
93 			rx_queue->page_ptr_mask;
94 
95 		/* The next slot in the recycle ring is available, but
96 		 * increment page_remove if the read pointer currently
97 		 * points here.
98 		 */
99 		if (read_index == index)
100 			++rx_queue->page_remove;
101 		rx_queue->page_ring[index] = page;
102 		++rx_queue->page_add;
103 		return;
104 	}
105 	++rx_queue->page_recycle_full;
106 	efx_unmap_rx_buffer(efx, rx_buf);
107 	put_page(rx_buf->page);
108 }
109 
110 /* Recycle the pages that are used by buffers that have just been received. */
111 void efx_recycle_rx_pages(struct efx_channel *channel,
112 			  struct efx_rx_buffer *rx_buf,
113 			  unsigned int n_frags)
114 {
115 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
116 
117 	do {
118 		efx_recycle_rx_page(channel, rx_buf);
119 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
120 	} while (--n_frags);
121 }
122 
123 void efx_discard_rx_packet(struct efx_channel *channel,
124 			   struct efx_rx_buffer *rx_buf,
125 			   unsigned int n_frags)
126 {
127 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
128 
129 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
130 
131 	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
132 }
133 
134 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
135 {
136 	unsigned int bufs_in_recycle_ring, page_ring_size;
137 	struct efx_nic *efx = rx_queue->efx;
138 
139 	/* Set the RX recycle ring size */
140 #ifdef CONFIG_PPC64
141 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
142 #else
143 	if (iommu_present(&pci_bus_type))
144 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
145 	else
146 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
147 #endif /* CONFIG_PPC64 */
148 
149 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
150 					    efx->rx_bufs_per_page);
151 	rx_queue->page_ring = kcalloc(page_ring_size,
152 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
153 	rx_queue->page_ptr_mask = page_ring_size - 1;
154 }
155 
156 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
157 {
158 	struct efx_nic *efx = rx_queue->efx;
159 	int i;
160 
161 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
162 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
163 		struct page *page = rx_queue->page_ring[i];
164 		struct efx_rx_page_state *state;
165 
166 		if (page == NULL)
167 			continue;
168 
169 		state = page_address(page);
170 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
171 			       PAGE_SIZE << efx->rx_buffer_order,
172 			       DMA_FROM_DEVICE);
173 		put_page(page);
174 	}
175 	kfree(rx_queue->page_ring);
176 	rx_queue->page_ring = NULL;
177 }
178 
179 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
180 			       struct efx_rx_buffer *rx_buf)
181 {
182 	/* Release the page reference we hold for the buffer. */
183 	if (rx_buf->page)
184 		put_page(rx_buf->page);
185 
186 	/* If this is the last buffer in a page, unmap and free it. */
187 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
188 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
189 		efx_free_rx_buffers(rx_queue, rx_buf, 1);
190 	}
191 	rx_buf->page = NULL;
192 }
193 
194 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
195 {
196 	struct efx_nic *efx = rx_queue->efx;
197 	unsigned int entries;
198 	int rc;
199 
200 	/* Create the smallest power-of-two aligned ring */
201 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
202 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
203 	rx_queue->ptr_mask = entries - 1;
204 
205 	netif_dbg(efx, probe, efx->net_dev,
206 		  "creating RX queue %d size %#x mask %#x\n",
207 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
208 		  rx_queue->ptr_mask);
209 
210 	/* Allocate RX buffers */
211 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
212 				   GFP_KERNEL);
213 	if (!rx_queue->buffer)
214 		return -ENOMEM;
215 
216 	rc = efx_nic_probe_rx(rx_queue);
217 	if (rc) {
218 		kfree(rx_queue->buffer);
219 		rx_queue->buffer = NULL;
220 	}
221 
222 	return rc;
223 }
224 
225 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
226 {
227 	unsigned int max_fill, trigger, max_trigger;
228 	struct efx_nic *efx = rx_queue->efx;
229 	int rc = 0;
230 
231 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
232 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
233 
234 	/* Initialise ptr fields */
235 	rx_queue->added_count = 0;
236 	rx_queue->notified_count = 0;
237 	rx_queue->removed_count = 0;
238 	rx_queue->min_fill = -1U;
239 	efx_init_rx_recycle_ring(rx_queue);
240 
241 	rx_queue->page_remove = 0;
242 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
243 	rx_queue->page_recycle_count = 0;
244 	rx_queue->page_recycle_failed = 0;
245 	rx_queue->page_recycle_full = 0;
246 
247 	/* Initialise limit fields */
248 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
249 	max_trigger =
250 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
251 	if (rx_refill_threshold != 0) {
252 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
253 		if (trigger > max_trigger)
254 			trigger = max_trigger;
255 	} else {
256 		trigger = max_trigger;
257 	}
258 
259 	rx_queue->max_fill = max_fill;
260 	rx_queue->fast_fill_trigger = trigger;
261 	rx_queue->refill_enabled = true;
262 
263 	/* Initialise XDP queue information */
264 	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
265 			      rx_queue->core_index);
266 
267 	if (rc) {
268 		netif_err(efx, rx_err, efx->net_dev,
269 			  "Failure to initialise XDP queue information rc=%d\n",
270 			  rc);
271 		efx->xdp_rxq_info_failed = true;
272 	} else {
273 		rx_queue->xdp_rxq_info_valid = true;
274 	}
275 
276 	/* Set up RX descriptor ring */
277 	efx_nic_init_rx(rx_queue);
278 }
279 
280 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
281 {
282 	struct efx_rx_buffer *rx_buf;
283 	int i;
284 
285 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
286 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
287 
288 	del_timer_sync(&rx_queue->slow_fill);
289 
290 	/* Release RX buffers from the current read ptr to the write ptr */
291 	if (rx_queue->buffer) {
292 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
293 		     i++) {
294 			unsigned int index = i & rx_queue->ptr_mask;
295 
296 			rx_buf = efx_rx_buffer(rx_queue, index);
297 			efx_fini_rx_buffer(rx_queue, rx_buf);
298 		}
299 	}
300 
301 	efx_fini_rx_recycle_ring(rx_queue);
302 
303 	if (rx_queue->xdp_rxq_info_valid)
304 		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
305 
306 	rx_queue->xdp_rxq_info_valid = false;
307 }
308 
309 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
310 {
311 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
312 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
313 
314 	efx_nic_remove_rx(rx_queue);
315 
316 	kfree(rx_queue->buffer);
317 	rx_queue->buffer = NULL;
318 }
319 
320 /* Unmap a DMA-mapped page.  This function is only called for the final RX
321  * buffer in a page.
322  */
323 void efx_unmap_rx_buffer(struct efx_nic *efx,
324 			 struct efx_rx_buffer *rx_buf)
325 {
326 	struct page *page = rx_buf->page;
327 
328 	if (page) {
329 		struct efx_rx_page_state *state = page_address(page);
330 
331 		dma_unmap_page(&efx->pci_dev->dev,
332 			       state->dma_addr,
333 			       PAGE_SIZE << efx->rx_buffer_order,
334 			       DMA_FROM_DEVICE);
335 	}
336 }
337 
338 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
339 			 struct efx_rx_buffer *rx_buf,
340 			 unsigned int num_bufs)
341 {
342 	do {
343 		if (rx_buf->page) {
344 			put_page(rx_buf->page);
345 			rx_buf->page = NULL;
346 		}
347 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
348 	} while (--num_bufs);
349 }
350 
351 void efx_rx_slow_fill(struct timer_list *t)
352 {
353 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
354 
355 	/* Post an event to cause NAPI to run and refill the queue */
356 	efx_nic_generate_fill_event(rx_queue);
357 	++rx_queue->slow_fill_count;
358 }
359 
360 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
361 {
362 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
363 }
364 
365 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
366  *
367  * @rx_queue:		Efx RX queue
368  *
369  * This allocates a batch of pages, maps them for DMA, and populates
370  * struct efx_rx_buffers for each one. Return a negative error code or
371  * 0 on success. If a single page can be used for multiple buffers,
372  * then the page will either be inserted fully, or not at all.
373  */
374 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
375 {
376 	unsigned int page_offset, index, count;
377 	struct efx_nic *efx = rx_queue->efx;
378 	struct efx_rx_page_state *state;
379 	struct efx_rx_buffer *rx_buf;
380 	dma_addr_t dma_addr;
381 	struct page *page;
382 
383 	count = 0;
384 	do {
385 		page = efx_reuse_page(rx_queue);
386 		if (page == NULL) {
387 			page = alloc_pages(__GFP_COMP |
388 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
389 					   efx->rx_buffer_order);
390 			if (unlikely(page == NULL))
391 				return -ENOMEM;
392 			dma_addr =
393 				dma_map_page(&efx->pci_dev->dev, page, 0,
394 					     PAGE_SIZE << efx->rx_buffer_order,
395 					     DMA_FROM_DEVICE);
396 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
397 						       dma_addr))) {
398 				__free_pages(page, efx->rx_buffer_order);
399 				return -EIO;
400 			}
401 			state = page_address(page);
402 			state->dma_addr = dma_addr;
403 		} else {
404 			state = page_address(page);
405 			dma_addr = state->dma_addr;
406 		}
407 
408 		dma_addr += sizeof(struct efx_rx_page_state);
409 		page_offset = sizeof(struct efx_rx_page_state);
410 
411 		do {
412 			index = rx_queue->added_count & rx_queue->ptr_mask;
413 			rx_buf = efx_rx_buffer(rx_queue, index);
414 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
415 					   EFX_XDP_HEADROOM;
416 			rx_buf->page = page;
417 			rx_buf->page_offset = page_offset + efx->rx_ip_align +
418 					      EFX_XDP_HEADROOM;
419 			rx_buf->len = efx->rx_dma_len;
420 			rx_buf->flags = 0;
421 			++rx_queue->added_count;
422 			get_page(page);
423 			dma_addr += efx->rx_page_buf_step;
424 			page_offset += efx->rx_page_buf_step;
425 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
426 
427 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
428 	} while (++count < efx->rx_pages_per_batch);
429 
430 	return 0;
431 }
432 
433 void efx_rx_config_page_split(struct efx_nic *efx)
434 {
435 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
436 				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
437 				      EFX_RX_BUF_ALIGNMENT);
438 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
439 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
440 		efx->rx_page_buf_step);
441 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
442 		efx->rx_bufs_per_page;
443 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
444 					       efx->rx_bufs_per_page);
445 }
446 
447 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
448  * @rx_queue:		RX descriptor queue
449  *
450  * This will aim to fill the RX descriptor queue up to
451  * @rx_queue->@max_fill. If there is insufficient atomic
452  * memory to do so, a slow fill will be scheduled.
453  *
454  * The caller must provide serialisation (none is used here). In practise,
455  * this means this function must run from the NAPI handler, or be called
456  * when NAPI is disabled.
457  */
458 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
459 {
460 	struct efx_nic *efx = rx_queue->efx;
461 	unsigned int fill_level, batch_size;
462 	int space, rc = 0;
463 
464 	if (!rx_queue->refill_enabled)
465 		return;
466 
467 	/* Calculate current fill level, and exit if we don't need to fill */
468 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
469 	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
470 	if (fill_level >= rx_queue->fast_fill_trigger)
471 		goto out;
472 
473 	/* Record minimum fill level */
474 	if (unlikely(fill_level < rx_queue->min_fill)) {
475 		if (fill_level)
476 			rx_queue->min_fill = fill_level;
477 	}
478 
479 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
480 	space = rx_queue->max_fill - fill_level;
481 	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
482 
483 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
484 		   "RX queue %d fast-filling descriptor ring from"
485 		   " level %d to level %d\n",
486 		   efx_rx_queue_index(rx_queue), fill_level,
487 		   rx_queue->max_fill);
488 
489 	do {
490 		rc = efx_init_rx_buffers(rx_queue, atomic);
491 		if (unlikely(rc)) {
492 			/* Ensure that we don't leave the rx queue empty */
493 			efx_schedule_slow_fill(rx_queue);
494 			goto out;
495 		}
496 	} while ((space -= batch_size) >= batch_size);
497 
498 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
499 		   "RX queue %d fast-filled descriptor ring "
500 		   "to level %d\n", efx_rx_queue_index(rx_queue),
501 		   rx_queue->added_count - rx_queue->removed_count);
502 
503  out:
504 	if (rx_queue->notified_count != rx_queue->added_count)
505 		efx_nic_notify_rx_desc(rx_queue);
506 }
507 
508 /* Pass a received packet up through GRO.  GRO can handle pages
509  * regardless of checksum state and skbs with a good checksum.
510  */
511 void
512 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
513 		  unsigned int n_frags, u8 *eh, __wsum csum)
514 {
515 	struct napi_struct *napi = &channel->napi_str;
516 	struct efx_nic *efx = channel->efx;
517 	struct sk_buff *skb;
518 
519 	skb = napi_get_frags(napi);
520 	if (unlikely(!skb)) {
521 		struct efx_rx_queue *rx_queue;
522 
523 		rx_queue = efx_channel_get_rx_queue(channel);
524 		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
525 		return;
526 	}
527 
528 	if (efx->net_dev->features & NETIF_F_RXHASH)
529 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
530 			     PKT_HASH_TYPE_L3);
531 	if (csum) {
532 		skb->csum = csum;
533 		skb->ip_summed = CHECKSUM_COMPLETE;
534 	} else {
535 		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
536 				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
537 	}
538 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
539 
540 	for (;;) {
541 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
542 				   rx_buf->page, rx_buf->page_offset,
543 				   rx_buf->len);
544 		rx_buf->page = NULL;
545 		skb->len += rx_buf->len;
546 		if (skb_shinfo(skb)->nr_frags == n_frags)
547 			break;
548 
549 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
550 	}
551 
552 	skb->data_len = skb->len;
553 	skb->truesize += n_frags * efx->rx_buffer_truesize;
554 
555 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
556 
557 	napi_gro_frags(napi);
558 }
559 
560 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
561  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
562  */
563 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
564 {
565 	struct list_head *head = &efx->rss_context.list;
566 	struct efx_rss_context *ctx, *new;
567 	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
568 
569 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
570 
571 	/* Search for first gap in the numbering */
572 	list_for_each_entry(ctx, head, list) {
573 		if (ctx->user_id != id)
574 			break;
575 		id++;
576 		/* Check for wrap.  If this happens, we have nearly 2^32
577 		 * allocated RSS contexts, which seems unlikely.
578 		 */
579 		if (WARN_ON_ONCE(!id))
580 			return NULL;
581 	}
582 
583 	/* Create the new entry */
584 	new = kmalloc(sizeof(*new), GFP_KERNEL);
585 	if (!new)
586 		return NULL;
587 	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
588 	new->rx_hash_udp_4tuple = false;
589 
590 	/* Insert the new entry into the gap */
591 	new->user_id = id;
592 	list_add_tail(&new->list, &ctx->list);
593 	return new;
594 }
595 
596 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
597 {
598 	struct list_head *head = &efx->rss_context.list;
599 	struct efx_rss_context *ctx;
600 
601 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
602 
603 	list_for_each_entry(ctx, head, list)
604 		if (ctx->user_id == id)
605 			return ctx;
606 	return NULL;
607 }
608 
609 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
610 {
611 	list_del(&ctx->list);
612 	kfree(ctx);
613 }
614 
615 void efx_set_default_rx_indir_table(struct efx_nic *efx,
616 				    struct efx_rss_context *ctx)
617 {
618 	size_t i;
619 
620 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
621 		ctx->rx_indir_table[i] =
622 			ethtool_rxfh_indir_default(i, efx->rss_spread);
623 }
624 
625 /**
626  * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
627  * @spec: Specification to test
628  *
629  * Return: %true if the specification is a non-drop RX filter that
630  * matches a local MAC address I/G bit value of 1 or matches a local
631  * IPv4 or IPv6 address value in the respective multicast address
632  * range.  Otherwise %false.
633  */
634 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
635 {
636 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
637 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
638 		return false;
639 
640 	if (spec->match_flags &
641 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
642 	    is_multicast_ether_addr(spec->loc_mac))
643 		return true;
644 
645 	if ((spec->match_flags &
646 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
647 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
648 		if (spec->ether_type == htons(ETH_P_IP) &&
649 		    ipv4_is_multicast(spec->loc_host[0]))
650 			return true;
651 		if (spec->ether_type == htons(ETH_P_IPV6) &&
652 		    ((const u8 *)spec->loc_host)[0] == 0xff)
653 			return true;
654 	}
655 
656 	return false;
657 }
658 
659 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
660 			   const struct efx_filter_spec *right)
661 {
662 	if ((left->match_flags ^ right->match_flags) |
663 	    ((left->flags ^ right->flags) &
664 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
665 		return false;
666 
667 	return memcmp(&left->outer_vid, &right->outer_vid,
668 		      sizeof(struct efx_filter_spec) -
669 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
670 }
671 
672 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
673 {
674 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
675 	return jhash2((const u32 *)&spec->outer_vid,
676 		      (sizeof(struct efx_filter_spec) -
677 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
678 		      0);
679 }
680 
681 #ifdef CONFIG_RFS_ACCEL
682 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
683 			bool *force)
684 {
685 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
686 		/* ARFS is currently updating this entry, leave it */
687 		return false;
688 	}
689 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
690 		/* ARFS tried and failed to update this, so it's probably out
691 		 * of date.  Remove the filter and the ARFS rule entry.
692 		 */
693 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
694 		*force = true;
695 		return true;
696 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
697 		/* ARFS has moved on, so old filter is not needed.  Since we did
698 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
699 		 * not be removed by efx_rps_hash_del() subsequently.
700 		 */
701 		*force = true;
702 		return true;
703 	}
704 	/* Remove it iff ARFS wants to. */
705 	return true;
706 }
707 
708 static
709 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
710 				       const struct efx_filter_spec *spec)
711 {
712 	u32 hash = efx_filter_spec_hash(spec);
713 
714 	lockdep_assert_held(&efx->rps_hash_lock);
715 	if (!efx->rps_hash_table)
716 		return NULL;
717 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
718 }
719 
720 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
721 					const struct efx_filter_spec *spec)
722 {
723 	struct efx_arfs_rule *rule;
724 	struct hlist_head *head;
725 	struct hlist_node *node;
726 
727 	head = efx_rps_hash_bucket(efx, spec);
728 	if (!head)
729 		return NULL;
730 	hlist_for_each(node, head) {
731 		rule = container_of(node, struct efx_arfs_rule, node);
732 		if (efx_filter_spec_equal(spec, &rule->spec))
733 			return rule;
734 	}
735 	return NULL;
736 }
737 
738 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
739 				       const struct efx_filter_spec *spec,
740 				       bool *new)
741 {
742 	struct efx_arfs_rule *rule;
743 	struct hlist_head *head;
744 	struct hlist_node *node;
745 
746 	head = efx_rps_hash_bucket(efx, spec);
747 	if (!head)
748 		return NULL;
749 	hlist_for_each(node, head) {
750 		rule = container_of(node, struct efx_arfs_rule, node);
751 		if (efx_filter_spec_equal(spec, &rule->spec)) {
752 			*new = false;
753 			return rule;
754 		}
755 	}
756 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
757 	*new = true;
758 	if (rule) {
759 		memcpy(&rule->spec, spec, sizeof(rule->spec));
760 		hlist_add_head(&rule->node, head);
761 	}
762 	return rule;
763 }
764 
765 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
766 {
767 	struct efx_arfs_rule *rule;
768 	struct hlist_head *head;
769 	struct hlist_node *node;
770 
771 	head = efx_rps_hash_bucket(efx, spec);
772 	if (WARN_ON(!head))
773 		return;
774 	hlist_for_each(node, head) {
775 		rule = container_of(node, struct efx_arfs_rule, node);
776 		if (efx_filter_spec_equal(spec, &rule->spec)) {
777 			/* Someone already reused the entry.  We know that if
778 			 * this check doesn't fire (i.e. filter_id == REMOVING)
779 			 * then the REMOVING mark was put there by our caller,
780 			 * because caller is holding a lock on filter table and
781 			 * only holders of that lock set REMOVING.
782 			 */
783 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
784 				return;
785 			hlist_del(node);
786 			kfree(rule);
787 			return;
788 		}
789 	}
790 	/* We didn't find it. */
791 	WARN_ON(1);
792 }
793 #endif
794 
795 int efx_probe_filters(struct efx_nic *efx)
796 {
797 	int rc;
798 
799 	init_rwsem(&efx->filter_sem);
800 	mutex_lock(&efx->mac_lock);
801 	down_write(&efx->filter_sem);
802 	rc = efx->type->filter_table_probe(efx);
803 	if (rc)
804 		goto out_unlock;
805 
806 #ifdef CONFIG_RFS_ACCEL
807 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
808 		struct efx_channel *channel;
809 		int i, success = 1;
810 
811 		efx_for_each_channel(channel, efx) {
812 			channel->rps_flow_id =
813 				kcalloc(efx->type->max_rx_ip_filters,
814 					sizeof(*channel->rps_flow_id),
815 					GFP_KERNEL);
816 			if (!channel->rps_flow_id)
817 				success = 0;
818 			else
819 				for (i = 0;
820 				     i < efx->type->max_rx_ip_filters;
821 				     ++i)
822 					channel->rps_flow_id[i] =
823 						RPS_FLOW_ID_INVALID;
824 			channel->rfs_expire_index = 0;
825 			channel->rfs_filter_count = 0;
826 		}
827 
828 		if (!success) {
829 			efx_for_each_channel(channel, efx)
830 				kfree(channel->rps_flow_id);
831 			efx->type->filter_table_remove(efx);
832 			rc = -ENOMEM;
833 			goto out_unlock;
834 		}
835 	}
836 #endif
837 out_unlock:
838 	up_write(&efx->filter_sem);
839 	mutex_unlock(&efx->mac_lock);
840 	return rc;
841 }
842 
843 void efx_remove_filters(struct efx_nic *efx)
844 {
845 #ifdef CONFIG_RFS_ACCEL
846 	struct efx_channel *channel;
847 
848 	efx_for_each_channel(channel, efx) {
849 		cancel_delayed_work_sync(&channel->filter_work);
850 		kfree(channel->rps_flow_id);
851 	}
852 #endif
853 	down_write(&efx->filter_sem);
854 	efx->type->filter_table_remove(efx);
855 	up_write(&efx->filter_sem);
856 }
857 
858 #ifdef CONFIG_RFS_ACCEL
859 
860 static void efx_filter_rfs_work(struct work_struct *data)
861 {
862 	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
863 							      work);
864 	struct efx_nic *efx = netdev_priv(req->net_dev);
865 	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
866 	int slot_idx = req - efx->rps_slot;
867 	struct efx_arfs_rule *rule;
868 	u16 arfs_id = 0;
869 	int rc;
870 
871 	rc = efx->type->filter_insert(efx, &req->spec, true);
872 	if (rc >= 0)
873 		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
874 		rc %= efx->type->max_rx_ip_filters;
875 	if (efx->rps_hash_table) {
876 		spin_lock_bh(&efx->rps_hash_lock);
877 		rule = efx_rps_hash_find(efx, &req->spec);
878 		/* The rule might have already gone, if someone else's request
879 		 * for the same spec was already worked and then expired before
880 		 * we got around to our work.  In that case we have nothing
881 		 * tying us to an arfs_id, meaning that as soon as the filter
882 		 * is considered for expiry it will be removed.
883 		 */
884 		if (rule) {
885 			if (rc < 0)
886 				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
887 			else
888 				rule->filter_id = rc;
889 			arfs_id = rule->arfs_id;
890 		}
891 		spin_unlock_bh(&efx->rps_hash_lock);
892 	}
893 	if (rc >= 0) {
894 		/* Remember this so we can check whether to expire the filter
895 		 * later.
896 		 */
897 		mutex_lock(&efx->rps_mutex);
898 		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
899 			channel->rfs_filter_count++;
900 		channel->rps_flow_id[rc] = req->flow_id;
901 		mutex_unlock(&efx->rps_mutex);
902 
903 		if (req->spec.ether_type == htons(ETH_P_IP))
904 			netif_info(efx, rx_status, efx->net_dev,
905 				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
906 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
907 				   req->spec.rem_host, ntohs(req->spec.rem_port),
908 				   req->spec.loc_host, ntohs(req->spec.loc_port),
909 				   req->rxq_index, req->flow_id, rc, arfs_id);
910 		else
911 			netif_info(efx, rx_status, efx->net_dev,
912 				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
913 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
914 				   req->spec.rem_host, ntohs(req->spec.rem_port),
915 				   req->spec.loc_host, ntohs(req->spec.loc_port),
916 				   req->rxq_index, req->flow_id, rc, arfs_id);
917 		channel->n_rfs_succeeded++;
918 	} else {
919 		if (req->spec.ether_type == htons(ETH_P_IP))
920 			netif_dbg(efx, rx_status, efx->net_dev,
921 				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
922 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
923 				  req->spec.rem_host, ntohs(req->spec.rem_port),
924 				  req->spec.loc_host, ntohs(req->spec.loc_port),
925 				  req->rxq_index, req->flow_id, rc, arfs_id);
926 		else
927 			netif_dbg(efx, rx_status, efx->net_dev,
928 				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
929 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
930 				  req->spec.rem_host, ntohs(req->spec.rem_port),
931 				  req->spec.loc_host, ntohs(req->spec.loc_port),
932 				  req->rxq_index, req->flow_id, rc, arfs_id);
933 		channel->n_rfs_failed++;
934 		/* We're overloading the NIC's filter tables, so let's do a
935 		 * chunk of extra expiry work.
936 		 */
937 		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
938 						     100u));
939 	}
940 
941 	/* Release references */
942 	clear_bit(slot_idx, &efx->rps_slot_map);
943 	dev_put(req->net_dev);
944 }
945 
946 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
947 		   u16 rxq_index, u32 flow_id)
948 {
949 	struct efx_nic *efx = netdev_priv(net_dev);
950 	struct efx_async_filter_insertion *req;
951 	struct efx_arfs_rule *rule;
952 	struct flow_keys fk;
953 	int slot_idx;
954 	bool new;
955 	int rc;
956 
957 	/* find a free slot */
958 	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
959 		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
960 			break;
961 	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
962 		return -EBUSY;
963 
964 	if (flow_id == RPS_FLOW_ID_INVALID) {
965 		rc = -EINVAL;
966 		goto out_clear;
967 	}
968 
969 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
970 		rc = -EPROTONOSUPPORT;
971 		goto out_clear;
972 	}
973 
974 	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
975 		rc = -EPROTONOSUPPORT;
976 		goto out_clear;
977 	}
978 	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
979 		rc = -EPROTONOSUPPORT;
980 		goto out_clear;
981 	}
982 
983 	req = efx->rps_slot + slot_idx;
984 	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
985 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
986 			   rxq_index);
987 	req->spec.match_flags =
988 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
989 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
990 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
991 	req->spec.ether_type = fk.basic.n_proto;
992 	req->spec.ip_proto = fk.basic.ip_proto;
993 
994 	if (fk.basic.n_proto == htons(ETH_P_IP)) {
995 		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
996 		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
997 	} else {
998 		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
999 		       sizeof(struct in6_addr));
1000 		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
1001 		       sizeof(struct in6_addr));
1002 	}
1003 
1004 	req->spec.rem_port = fk.ports.src;
1005 	req->spec.loc_port = fk.ports.dst;
1006 
1007 	if (efx->rps_hash_table) {
1008 		/* Add it to ARFS hash table */
1009 		spin_lock(&efx->rps_hash_lock);
1010 		rule = efx_rps_hash_add(efx, &req->spec, &new);
1011 		if (!rule) {
1012 			rc = -ENOMEM;
1013 			goto out_unlock;
1014 		}
1015 		if (new)
1016 			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1017 		rc = rule->arfs_id;
1018 		/* Skip if existing or pending filter already does the right thing */
1019 		if (!new && rule->rxq_index == rxq_index &&
1020 		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1021 			goto out_unlock;
1022 		rule->rxq_index = rxq_index;
1023 		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1024 		spin_unlock(&efx->rps_hash_lock);
1025 	} else {
1026 		/* Without an ARFS hash table, we just use arfs_id 0 for all
1027 		 * filters.  This means if multiple flows hash to the same
1028 		 * flow_id, all but the most recently touched will be eligible
1029 		 * for expiry.
1030 		 */
1031 		rc = 0;
1032 	}
1033 
1034 	/* Queue the request */
1035 	dev_hold(req->net_dev = net_dev);
1036 	INIT_WORK(&req->work, efx_filter_rfs_work);
1037 	req->rxq_index = rxq_index;
1038 	req->flow_id = flow_id;
1039 	schedule_work(&req->work);
1040 	return rc;
1041 out_unlock:
1042 	spin_unlock(&efx->rps_hash_lock);
1043 out_clear:
1044 	clear_bit(slot_idx, &efx->rps_slot_map);
1045 	return rc;
1046 }
1047 
1048 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1049 {
1050 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1051 	struct efx_nic *efx = channel->efx;
1052 	unsigned int index, size, start;
1053 	u32 flow_id;
1054 
1055 	if (!mutex_trylock(&efx->rps_mutex))
1056 		return false;
1057 	expire_one = efx->type->filter_rfs_expire_one;
1058 	index = channel->rfs_expire_index;
1059 	start = index;
1060 	size = efx->type->max_rx_ip_filters;
1061 	while (quota) {
1062 		flow_id = channel->rps_flow_id[index];
1063 
1064 		if (flow_id != RPS_FLOW_ID_INVALID) {
1065 			quota--;
1066 			if (expire_one(efx, flow_id, index)) {
1067 				netif_info(efx, rx_status, efx->net_dev,
1068 					   "expired filter %d [channel %u flow %u]\n",
1069 					   index, channel->channel, flow_id);
1070 				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1071 				channel->rfs_filter_count--;
1072 			}
1073 		}
1074 		if (++index == size)
1075 			index = 0;
1076 		/* If we were called with a quota that exceeds the total number
1077 		 * of filters in the table (which shouldn't happen, but could
1078 		 * if two callers race), ensure that we don't loop forever -
1079 		 * stop when we've examined every row of the table.
1080 		 */
1081 		if (index == start)
1082 			break;
1083 	}
1084 
1085 	channel->rfs_expire_index = index;
1086 	mutex_unlock(&efx->rps_mutex);
1087 	return true;
1088 }
1089 
1090 #endif /* CONFIG_RFS_ACCEL */
1091