1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bpf_trace.h>
36 #include <linux/mlx4/cq.h>
37 #include <linux/slab.h>
38 #include <linux/mlx4/qp.h>
39 #include <linux/skbuff.h>
40 #include <linux/rculist.h>
41 #include <linux/if_ether.h>
42 #include <linux/if_vlan.h>
43 #include <linux/vmalloc.h>
44 #include <linux/irq.h>
45 
46 #include <net/ip.h>
47 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/ip6_checksum.h>
49 #endif
50 
51 #include "mlx4_en.h"
52 
53 static int mlx4_alloc_page(struct mlx4_en_priv *priv,
54 			   struct mlx4_en_rx_alloc *frag,
55 			   gfp_t gfp)
56 {
57 	struct page *page;
58 	dma_addr_t dma;
59 
60 	page = alloc_page(gfp);
61 	if (unlikely(!page))
62 		return -ENOMEM;
63 	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
64 	if (unlikely(dma_mapping_error(priv->ddev, dma))) {
65 		__free_page(page);
66 		return -ENOMEM;
67 	}
68 	frag->page = page;
69 	frag->dma = dma;
70 	frag->page_offset = priv->rx_headroom;
71 	return 0;
72 }
73 
74 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
75 			       struct mlx4_en_rx_ring *ring,
76 			       struct mlx4_en_rx_desc *rx_desc,
77 			       struct mlx4_en_rx_alloc *frags,
78 			       gfp_t gfp)
79 {
80 	int i;
81 
82 	for (i = 0; i < priv->num_frags; i++, frags++) {
83 		if (!frags->page) {
84 			if (mlx4_alloc_page(priv, frags, gfp))
85 				return -ENOMEM;
86 			ring->rx_alloc_pages++;
87 		}
88 		rx_desc->data[i].addr = cpu_to_be64(frags->dma +
89 						    frags->page_offset);
90 	}
91 	return 0;
92 }
93 
94 static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
95 			      struct mlx4_en_rx_alloc *frag)
96 {
97 	if (frag->page) {
98 		dma_unmap_page(priv->ddev, frag->dma,
99 			       PAGE_SIZE, priv->dma_dir);
100 		__free_page(frag->page);
101 	}
102 	/* We need to clear all fields, otherwise a change of priv->log_rx_info
103 	 * could lead to see garbage later in frag->page.
104 	 */
105 	memset(frag, 0, sizeof(*frag));
106 }
107 
108 static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
109 				 struct mlx4_en_rx_ring *ring, int index)
110 {
111 	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
112 	int possible_frags;
113 	int i;
114 
115 	/* Set size and memtype fields */
116 	for (i = 0; i < priv->num_frags; i++) {
117 		rx_desc->data[i].byte_count =
118 			cpu_to_be32(priv->frag_info[i].frag_size);
119 		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
120 	}
121 
122 	/* If the number of used fragments does not fill up the ring stride,
123 	 * remaining (unused) fragments must be padded with null address/size
124 	 * and a special memory key */
125 	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
126 	for (i = priv->num_frags; i < possible_frags; i++) {
127 		rx_desc->data[i].byte_count = 0;
128 		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
129 		rx_desc->data[i].addr = 0;
130 	}
131 }
132 
133 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
134 				   struct mlx4_en_rx_ring *ring, int index,
135 				   gfp_t gfp)
136 {
137 	struct mlx4_en_rx_desc *rx_desc = ring->buf +
138 		(index << ring->log_stride);
139 	struct mlx4_en_rx_alloc *frags = ring->rx_info +
140 					(index << priv->log_rx_info);
141 	if (likely(ring->page_cache.index > 0)) {
142 		/* XDP uses a single page per frame */
143 		if (!frags->page) {
144 			ring->page_cache.index--;
145 			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
146 			frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
147 		}
148 		frags->page_offset = XDP_PACKET_HEADROOM;
149 		rx_desc->data[0].addr = cpu_to_be64(frags->dma +
150 						    XDP_PACKET_HEADROOM);
151 		return 0;
152 	}
153 
154 	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
155 }
156 
157 static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
158 {
159 	return ring->prod == ring->cons;
160 }
161 
162 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
163 {
164 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
165 }
166 
167 /* slow path */
168 static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
169 				 struct mlx4_en_rx_ring *ring,
170 				 int index)
171 {
172 	struct mlx4_en_rx_alloc *frags;
173 	int nr;
174 
175 	frags = ring->rx_info + (index << priv->log_rx_info);
176 	for (nr = 0; nr < priv->num_frags; nr++) {
177 		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
178 		mlx4_en_free_frag(priv, frags + nr);
179 	}
180 }
181 
182 /* Function not in fast-path */
183 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
184 {
185 	struct mlx4_en_rx_ring *ring;
186 	int ring_ind;
187 	int buf_ind;
188 	int new_size;
189 
190 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
191 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
192 			ring = priv->rx_ring[ring_ind];
193 
194 			if (mlx4_en_prepare_rx_desc(priv, ring,
195 						    ring->actual_size,
196 						    GFP_KERNEL)) {
197 				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
198 					en_err(priv, "Failed to allocate enough rx buffers\n");
199 					return -ENOMEM;
200 				} else {
201 					new_size = rounddown_pow_of_two(ring->actual_size);
202 					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
203 						ring->actual_size, new_size);
204 					goto reduce_rings;
205 				}
206 			}
207 			ring->actual_size++;
208 			ring->prod++;
209 		}
210 	}
211 	return 0;
212 
213 reduce_rings:
214 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
215 		ring = priv->rx_ring[ring_ind];
216 		while (ring->actual_size > new_size) {
217 			ring->actual_size--;
218 			ring->prod--;
219 			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
220 		}
221 	}
222 
223 	return 0;
224 }
225 
226 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
227 				struct mlx4_en_rx_ring *ring)
228 {
229 	int index;
230 
231 	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
232 	       ring->cons, ring->prod);
233 
234 	/* Unmap and free Rx buffers */
235 	for (index = 0; index < ring->size; index++) {
236 		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
237 		mlx4_en_free_rx_desc(priv, ring, index);
238 	}
239 	ring->cons = 0;
240 	ring->prod = 0;
241 }
242 
243 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
244 {
245 	int i;
246 	int num_of_eqs;
247 	int num_rx_rings;
248 	struct mlx4_dev *dev = mdev->dev;
249 
250 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
251 		num_of_eqs = max_t(int, MIN_RX_RINGS,
252 				   min_t(int,
253 					 mlx4_get_eqs_per_port(mdev->dev, i),
254 					 DEF_RX_RINGS));
255 
256 		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
257 			min_t(int, num_of_eqs, num_online_cpus());
258 		mdev->profile.prof[i].rx_ring_num =
259 			rounddown_pow_of_two(num_rx_rings);
260 	}
261 }
262 
263 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
264 			   struct mlx4_en_rx_ring **pring,
265 			   u32 size, u16 stride, int node, int queue_index)
266 {
267 	struct mlx4_en_dev *mdev = priv->mdev;
268 	struct mlx4_en_rx_ring *ring;
269 	int err = -ENOMEM;
270 	int tmp;
271 
272 	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
273 	if (!ring) {
274 		en_err(priv, "Failed to allocate RX ring structure\n");
275 		return -ENOMEM;
276 	}
277 
278 	ring->prod = 0;
279 	ring->cons = 0;
280 	ring->size = size;
281 	ring->size_mask = size - 1;
282 	ring->stride = stride;
283 	ring->log_stride = ffs(ring->stride) - 1;
284 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
285 
286 	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
287 		goto err_ring;
288 
289 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
290 					sizeof(struct mlx4_en_rx_alloc));
291 	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
292 	if (!ring->rx_info) {
293 		err = -ENOMEM;
294 		goto err_xdp_info;
295 	}
296 
297 	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
298 		 ring->rx_info, tmp);
299 
300 	/* Allocate HW buffers on provided NUMA node */
301 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
302 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
303 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
304 	if (err)
305 		goto err_info;
306 
307 	ring->buf = ring->wqres.buf.direct.buf;
308 
309 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
310 
311 	*pring = ring;
312 	return 0;
313 
314 err_info:
315 	kvfree(ring->rx_info);
316 	ring->rx_info = NULL;
317 err_xdp_info:
318 	xdp_rxq_info_unreg(&ring->xdp_rxq);
319 err_ring:
320 	kfree(ring);
321 	*pring = NULL;
322 
323 	return err;
324 }
325 
326 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
327 {
328 	struct mlx4_en_rx_ring *ring;
329 	int i;
330 	int ring_ind;
331 	int err;
332 	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
333 					DS_SIZE * priv->num_frags);
334 
335 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
336 		ring = priv->rx_ring[ring_ind];
337 
338 		ring->prod = 0;
339 		ring->cons = 0;
340 		ring->actual_size = 0;
341 		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
342 
343 		ring->stride = stride;
344 		if (ring->stride <= TXBB_SIZE) {
345 			/* Stamp first unused send wqe */
346 			__be32 *ptr = (__be32 *)ring->buf;
347 			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
348 			*ptr = stamp;
349 			/* Move pointer to start of rx section */
350 			ring->buf += TXBB_SIZE;
351 		}
352 
353 		ring->log_stride = ffs(ring->stride) - 1;
354 		ring->buf_size = ring->size * ring->stride;
355 
356 		memset(ring->buf, 0, ring->buf_size);
357 		mlx4_en_update_rx_prod_db(ring);
358 
359 		/* Initialize all descriptors */
360 		for (i = 0; i < ring->size; i++)
361 			mlx4_en_init_rx_desc(priv, ring, i);
362 	}
363 	err = mlx4_en_fill_rx_buffers(priv);
364 	if (err)
365 		goto err_buffers;
366 
367 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
368 		ring = priv->rx_ring[ring_ind];
369 
370 		ring->size_mask = ring->actual_size - 1;
371 		mlx4_en_update_rx_prod_db(ring);
372 	}
373 
374 	return 0;
375 
376 err_buffers:
377 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
378 		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
379 
380 	ring_ind = priv->rx_ring_num - 1;
381 	while (ring_ind >= 0) {
382 		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
383 			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
384 		ring_ind--;
385 	}
386 	return err;
387 }
388 
389 /* We recover from out of memory by scheduling our napi poll
390  * function (mlx4_en_process_cq), which tries to allocate
391  * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
392  */
393 void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
394 {
395 	int ring;
396 
397 	if (!priv->port_up)
398 		return;
399 
400 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
401 		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
402 			local_bh_disable();
403 			napi_reschedule(&priv->rx_cq[ring]->napi);
404 			local_bh_enable();
405 		}
406 	}
407 }
408 
409 /* When the rx ring is running in page-per-packet mode, a released frame can go
410  * directly into a small cache, to avoid unmapping or touching the page
411  * allocator. In bpf prog performance scenarios, buffers are either forwarded
412  * or dropped, never converted to skbs, so every page can come directly from
413  * this cache when it is sized to be a multiple of the napi budget.
414  */
415 bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
416 			struct mlx4_en_rx_alloc *frame)
417 {
418 	struct mlx4_en_page_cache *cache = &ring->page_cache;
419 
420 	if (cache->index >= MLX4_EN_CACHE_SIZE)
421 		return false;
422 
423 	cache->buf[cache->index].page = frame->page;
424 	cache->buf[cache->index].dma = frame->dma;
425 	cache->index++;
426 	return true;
427 }
428 
429 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
430 			     struct mlx4_en_rx_ring **pring,
431 			     u32 size, u16 stride)
432 {
433 	struct mlx4_en_dev *mdev = priv->mdev;
434 	struct mlx4_en_rx_ring *ring = *pring;
435 	struct bpf_prog *old_prog;
436 
437 	old_prog = rcu_dereference_protected(
438 					ring->xdp_prog,
439 					lockdep_is_held(&mdev->state_lock));
440 	if (old_prog)
441 		bpf_prog_put(old_prog);
442 	xdp_rxq_info_unreg(&ring->xdp_rxq);
443 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
444 	kvfree(ring->rx_info);
445 	ring->rx_info = NULL;
446 	kfree(ring);
447 	*pring = NULL;
448 }
449 
450 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
451 				struct mlx4_en_rx_ring *ring)
452 {
453 	int i;
454 
455 	for (i = 0; i < ring->page_cache.index; i++) {
456 		dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
457 			       PAGE_SIZE, priv->dma_dir);
458 		put_page(ring->page_cache.buf[i].page);
459 	}
460 	ring->page_cache.index = 0;
461 	mlx4_en_free_rx_buf(priv, ring);
462 	if (ring->stride <= TXBB_SIZE)
463 		ring->buf -= TXBB_SIZE;
464 }
465 
466 
467 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
468 				    struct mlx4_en_rx_alloc *frags,
469 				    struct sk_buff *skb,
470 				    int length)
471 {
472 	const struct mlx4_en_frag_info *frag_info = priv->frag_info;
473 	unsigned int truesize = 0;
474 	bool release = true;
475 	int nr, frag_size;
476 	struct page *page;
477 	dma_addr_t dma;
478 
479 	/* Collect used fragments while replacing them in the HW descriptors */
480 	for (nr = 0;; frags++) {
481 		frag_size = min_t(int, length, frag_info->frag_size);
482 
483 		page = frags->page;
484 		if (unlikely(!page))
485 			goto fail;
486 
487 		dma = frags->dma;
488 		dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
489 					      frag_size, priv->dma_dir);
490 
491 		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
492 				     frag_size);
493 
494 		truesize += frag_info->frag_stride;
495 		if (frag_info->frag_stride == PAGE_SIZE / 2) {
496 			frags->page_offset ^= PAGE_SIZE / 2;
497 			release = page_count(page) != 1 ||
498 				  page_is_pfmemalloc(page) ||
499 				  page_to_nid(page) != numa_mem_id();
500 		} else if (!priv->rx_headroom) {
501 			/* rx_headroom for non XDP setup is always 0.
502 			 * When XDP is set, the above condition will
503 			 * guarantee page is always released.
504 			 */
505 			u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
506 
507 			frags->page_offset += sz_align;
508 			release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
509 		}
510 		if (release) {
511 			dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
512 			frags->page = NULL;
513 		} else {
514 			page_ref_inc(page);
515 		}
516 
517 		nr++;
518 		length -= frag_size;
519 		if (!length)
520 			break;
521 		frag_info++;
522 	}
523 	skb->truesize += truesize;
524 	return nr;
525 
526 fail:
527 	while (nr > 0) {
528 		nr--;
529 		__skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
530 	}
531 	return 0;
532 }
533 
534 static void validate_loopback(struct mlx4_en_priv *priv, void *va)
535 {
536 	const unsigned char *data = va + ETH_HLEN;
537 	int i;
538 
539 	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
540 		if (data[i] != (unsigned char)i)
541 			return;
542 	}
543 	/* Loopback found */
544 	priv->loopback_ok = 1;
545 }
546 
547 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
548 				      struct mlx4_en_rx_ring *ring)
549 {
550 	u32 missing = ring->actual_size - (ring->prod - ring->cons);
551 
552 	/* Try to batch allocations, but not too much. */
553 	if (missing < 8)
554 		return;
555 	do {
556 		if (mlx4_en_prepare_rx_desc(priv, ring,
557 					    ring->prod & ring->size_mask,
558 					    GFP_ATOMIC | __GFP_MEMALLOC))
559 			break;
560 		ring->prod++;
561 	} while (likely(--missing));
562 
563 	mlx4_en_update_rx_prod_db(ring);
564 }
565 
566 /* When hardware doesn't strip the vlan, we need to calculate the checksum
567  * over it and add it to the hardware's checksum calculation
568  */
569 static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
570 					 struct vlan_hdr *vlanh)
571 {
572 	return csum_add(hw_checksum, *(__wsum *)vlanh);
573 }
574 
575 /* Although the stack expects checksum which doesn't include the pseudo
576  * header, the HW adds it. To address that, we are subtracting the pseudo
577  * header checksum from the checksum value provided by the HW.
578  */
579 static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
580 			       struct iphdr *iph)
581 {
582 	__u16 length_for_csum = 0;
583 	__wsum csum_pseudo_header = 0;
584 	__u8 ipproto = iph->protocol;
585 
586 	if (unlikely(ipproto == IPPROTO_SCTP))
587 		return -1;
588 
589 	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
590 	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
591 						length_for_csum, ipproto, 0);
592 	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
593 	return 0;
594 }
595 
596 #if IS_ENABLED(CONFIG_IPV6)
597 /* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
598  * 4 first bytes : priority, version, flow_lbl
599  * and 2 additional bytes : nexthdr, hop_limit.
600  */
601 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
602 			       struct ipv6hdr *ipv6h)
603 {
604 	__u8 nexthdr = ipv6h->nexthdr;
605 	__wsum temp;
606 
607 	if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
608 		     nexthdr == IPPROTO_HOPOPTS ||
609 		     nexthdr == IPPROTO_SCTP))
610 		return -1;
611 
612 	/* priority, version, flow_lbl */
613 	temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
614 	/* nexthdr and hop_limit */
615 	skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
616 	return 0;
617 }
618 #endif
619 
620 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
621 
622 /* We reach this function only after checking that any of
623  * the (IPv4 | IPv6) bits are set in cqe->status.
624  */
625 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
626 		      netdev_features_t dev_features)
627 {
628 	__wsum hw_checksum = 0;
629 	void *hdr;
630 
631 	/* CQE csum doesn't cover padding octets in short ethernet
632 	 * frames. And the pad field is appended prior to calculating
633 	 * and appending the FCS field.
634 	 *
635 	 * Detecting these padded frames requires to verify and parse
636 	 * IP headers, so we simply force all those small frames to skip
637 	 * checksum complete.
638 	 */
639 	if (short_frame(skb->len))
640 		return -EINVAL;
641 
642 	hdr = (u8 *)va + sizeof(struct ethhdr);
643 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
644 
645 	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
646 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
647 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
648 		hdr += sizeof(struct vlan_hdr);
649 	}
650 
651 #if IS_ENABLED(CONFIG_IPV6)
652 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
653 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
654 #endif
655 	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
656 }
657 
658 #if IS_ENABLED(CONFIG_IPV6)
659 #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
660 #else
661 #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
662 #endif
663 
664 struct mlx4_en_xdp_buff {
665 	struct xdp_buff xdp;
666 	struct mlx4_cqe *cqe;
667 	struct mlx4_en_dev *mdev;
668 	struct mlx4_en_rx_ring *ring;
669 	struct net_device *dev;
670 };
671 
672 int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
673 {
674 	struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
675 
676 	if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
677 		return -EOPNOTSUPP;
678 
679 	*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
680 					  mlx4_en_get_cqe_ts(_ctx->cqe));
681 	return 0;
682 }
683 
684 int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
685 {
686 	struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
687 
688 	if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
689 		return -EOPNOTSUPP;
690 
691 	*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
692 	return 0;
693 }
694 
695 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
696 {
697 	struct mlx4_en_priv *priv = netdev_priv(dev);
698 	struct mlx4_en_xdp_buff mxbuf = {};
699 	int factor = priv->cqe_factor;
700 	struct mlx4_en_rx_ring *ring;
701 	struct bpf_prog *xdp_prog;
702 	int cq_ring = cq->ring;
703 	bool doorbell_pending;
704 	bool xdp_redir_flush;
705 	struct mlx4_cqe *cqe;
706 	int polled = 0;
707 	int index;
708 
709 	if (unlikely(!priv->port_up || budget <= 0))
710 		return 0;
711 
712 	ring = priv->rx_ring[cq_ring];
713 
714 	xdp_prog = rcu_dereference_bh(ring->xdp_prog);
715 	xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
716 	doorbell_pending = false;
717 	xdp_redir_flush = false;
718 
719 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
720 	 * descriptor offset can be deduced from the CQE index instead of
721 	 * reading 'cqe->index' */
722 	index = cq->mcq.cons_index & ring->size_mask;
723 	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
724 
725 	/* Process all completed CQEs */
726 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
727 		    cq->mcq.cons_index & cq->size)) {
728 		struct mlx4_en_rx_alloc *frags;
729 		enum pkt_hash_types hash_type;
730 		struct sk_buff *skb;
731 		unsigned int length;
732 		int ip_summed;
733 		void *va;
734 		int nr;
735 
736 		frags = ring->rx_info + (index << priv->log_rx_info);
737 		va = page_address(frags[0].page) + frags[0].page_offset;
738 		net_prefetchw(va);
739 		/*
740 		 * make sure we read the CQE after we read the ownership bit
741 		 */
742 		dma_rmb();
743 
744 		/* Drop packet on bad receive or bad checksum */
745 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
746 						MLX4_CQE_OPCODE_ERROR)) {
747 			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
748 			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
749 			       ((struct mlx4_err_cqe *)cqe)->syndrome);
750 			goto next;
751 		}
752 		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
753 			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
754 			goto next;
755 		}
756 
757 		/* Check if we need to drop the packet if SRIOV is not enabled
758 		 * and not performing the selftest or flb disabled
759 		 */
760 		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
761 			const struct ethhdr *ethh = va;
762 			dma_addr_t dma;
763 			/* Get pointer to first fragment since we haven't
764 			 * skb yet and cast it to ethhdr struct
765 			 */
766 			dma = frags[0].dma + frags[0].page_offset;
767 			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
768 						DMA_FROM_DEVICE);
769 
770 			if (is_multicast_ether_addr(ethh->h_dest)) {
771 				struct mlx4_mac_entry *entry;
772 				struct hlist_head *bucket;
773 				unsigned int mac_hash;
774 
775 				/* Drop the packet, since HW loopback-ed it */
776 				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
777 				bucket = &priv->mac_hash[mac_hash];
778 				hlist_for_each_entry_rcu_bh(entry, bucket, hlist) {
779 					if (ether_addr_equal_64bits(entry->mac,
780 								    ethh->h_source))
781 						goto next;
782 				}
783 			}
784 		}
785 
786 		if (unlikely(priv->validate_loopback)) {
787 			validate_loopback(priv, va);
788 			goto next;
789 		}
790 
791 		/*
792 		 * Packet is OK - process it.
793 		 */
794 		length = be32_to_cpu(cqe->byte_cnt);
795 		length -= ring->fcs_del;
796 
797 		/* A bpf program gets first chance to drop the packet. It may
798 		 * read bytes but not past the end of the frag.
799 		 */
800 		if (xdp_prog) {
801 			dma_addr_t dma;
802 			void *orig_data;
803 			u32 act;
804 
805 			dma = frags[0].dma + frags[0].page_offset;
806 			dma_sync_single_for_cpu(priv->ddev, dma,
807 						priv->frag_info[0].frag_size,
808 						DMA_FROM_DEVICE);
809 
810 			xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
811 					 frags[0].page_offset, length, true);
812 			orig_data = mxbuf.xdp.data;
813 			mxbuf.cqe = cqe;
814 			mxbuf.mdev = priv->mdev;
815 			mxbuf.ring = ring;
816 			mxbuf.dev = dev;
817 
818 			act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
819 
820 			length = mxbuf.xdp.data_end - mxbuf.xdp.data;
821 			if (mxbuf.xdp.data != orig_data) {
822 				frags[0].page_offset = mxbuf.xdp.data -
823 					mxbuf.xdp.data_hard_start;
824 				va = mxbuf.xdp.data;
825 			}
826 
827 			switch (act) {
828 			case XDP_PASS:
829 				break;
830 			case XDP_REDIRECT:
831 				if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
832 					ring->xdp_redirect++;
833 					xdp_redir_flush = true;
834 					frags[0].page = NULL;
835 					goto next;
836 				}
837 				ring->xdp_redirect_fail++;
838 				trace_xdp_exception(dev, xdp_prog, act);
839 				goto xdp_drop_no_cnt;
840 			case XDP_TX:
841 				if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
842 							length, cq_ring,
843 							&doorbell_pending))) {
844 					frags[0].page = NULL;
845 					goto next;
846 				}
847 				trace_xdp_exception(dev, xdp_prog, act);
848 				goto xdp_drop_no_cnt; /* Drop on xmit failure */
849 			default:
850 				bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
851 				fallthrough;
852 			case XDP_ABORTED:
853 				trace_xdp_exception(dev, xdp_prog, act);
854 				fallthrough;
855 			case XDP_DROP:
856 				ring->xdp_drop++;
857 xdp_drop_no_cnt:
858 				goto next;
859 			}
860 		}
861 
862 		ring->bytes += length;
863 		ring->packets++;
864 
865 		skb = napi_get_frags(&cq->napi);
866 		if (unlikely(!skb))
867 			goto next;
868 
869 		if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
870 			u64 timestamp = mlx4_en_get_cqe_ts(cqe);
871 
872 			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
873 					       timestamp);
874 		}
875 		skb_record_rx_queue(skb, cq_ring);
876 
877 		if (likely(dev->features & NETIF_F_RXCSUM)) {
878 			/* TODO: For IP non TCP/UDP packets when csum complete is
879 			 * not an option (not supported or any other reason) we can
880 			 * actually check cqe IPOK status bit and report
881 			 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
882 			 */
883 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
884 						       MLX4_CQE_STATUS_UDP)) &&
885 			    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
886 			    cqe->checksum == cpu_to_be16(0xffff)) {
887 				bool l2_tunnel;
888 
889 				l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
890 					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
891 				ip_summed = CHECKSUM_UNNECESSARY;
892 				hash_type = PKT_HASH_TYPE_L4;
893 				if (l2_tunnel)
894 					skb->csum_level = 1;
895 				ring->csum_ok++;
896 			} else {
897 				if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
898 				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
899 					goto csum_none;
900 				if (check_csum(cqe, skb, va, dev->features))
901 					goto csum_none;
902 				ip_summed = CHECKSUM_COMPLETE;
903 				hash_type = PKT_HASH_TYPE_L3;
904 				ring->csum_complete++;
905 			}
906 		} else {
907 csum_none:
908 			ip_summed = CHECKSUM_NONE;
909 			hash_type = PKT_HASH_TYPE_L3;
910 			ring->csum_none++;
911 		}
912 		skb->ip_summed = ip_summed;
913 		if (dev->features & NETIF_F_RXHASH)
914 			skb_set_hash(skb,
915 				     be32_to_cpu(cqe->immed_rss_invalid),
916 				     hash_type);
917 
918 		if ((cqe->vlan_my_qpn &
919 		     cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
920 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
921 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
922 					       be16_to_cpu(cqe->sl_vid));
923 		else if ((cqe->vlan_my_qpn &
924 			  cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
925 			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
926 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
927 					       be16_to_cpu(cqe->sl_vid));
928 
929 		nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
930 		if (likely(nr)) {
931 			skb_shinfo(skb)->nr_frags = nr;
932 			skb->len = length;
933 			skb->data_len = length;
934 			napi_gro_frags(&cq->napi);
935 		} else {
936 			__vlan_hwaccel_clear_tag(skb);
937 			skb_clear_hash(skb);
938 		}
939 next:
940 		++cq->mcq.cons_index;
941 		index = (cq->mcq.cons_index) & ring->size_mask;
942 		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
943 		if (unlikely(++polled == budget))
944 			break;
945 	}
946 
947 	if (xdp_redir_flush)
948 		xdp_do_flush();
949 
950 	if (likely(polled)) {
951 		if (doorbell_pending) {
952 			priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
953 			mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
954 		}
955 
956 		mlx4_cq_set_ci(&cq->mcq);
957 		wmb(); /* ensure HW sees CQ consumer before we post new buffers */
958 		ring->cons = cq->mcq.cons_index;
959 	}
960 
961 	mlx4_en_refill_rx_buffers(priv, ring);
962 
963 	return polled;
964 }
965 
966 
967 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
968 {
969 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
970 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
971 
972 	if (likely(priv->port_up))
973 		napi_schedule_irqoff(&cq->napi);
974 	else
975 		mlx4_en_arm_cq(priv, cq);
976 }
977 
978 /* Rx CQ polling - called by NAPI */
979 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
980 {
981 	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
982 	struct net_device *dev = cq->dev;
983 	struct mlx4_en_priv *priv = netdev_priv(dev);
984 	struct mlx4_en_cq *xdp_tx_cq = NULL;
985 	bool clean_complete = true;
986 	int done;
987 
988 	if (!budget)
989 		return 0;
990 
991 	if (priv->tx_ring_num[TX_XDP]) {
992 		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
993 		if (xdp_tx_cq->xdp_busy) {
994 			clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
995 							       budget) < budget;
996 			xdp_tx_cq->xdp_busy = !clean_complete;
997 		}
998 	}
999 
1000 	done = mlx4_en_process_rx_cq(dev, cq, budget);
1001 
1002 	/* If we used up all the quota - we're probably not done yet... */
1003 	if (done == budget || !clean_complete) {
1004 		int cpu_curr;
1005 
1006 		/* in case we got here because of !clean_complete */
1007 		done = budget;
1008 
1009 		cpu_curr = smp_processor_id();
1010 
1011 		if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
1012 			return budget;
1013 
1014 		/* Current cpu is not according to smp_irq_affinity -
1015 		 * probably affinity changed. Need to stop this NAPI
1016 		 * poll, and restart it on the right CPU.
1017 		 * Try to avoid returning a too small value (like 0),
1018 		 * to not fool net_rx_action() and its netdev_budget
1019 		 */
1020 		if (done)
1021 			done--;
1022 	}
1023 	/* Done for now */
1024 	if (likely(napi_complete_done(napi, done)))
1025 		mlx4_en_arm_cq(priv, cq);
1026 	return done;
1027 }
1028 
1029 void mlx4_en_calc_rx_buf(struct net_device *dev)
1030 {
1031 	struct mlx4_en_priv *priv = netdev_priv(dev);
1032 	int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
1033 	int i = 0;
1034 
1035 	/* bpf requires buffers to be set up as 1 packet per page.
1036 	 * This only works when num_frags == 1.
1037 	 */
1038 	if (priv->tx_ring_num[TX_XDP]) {
1039 		priv->frag_info[0].frag_size = eff_mtu;
1040 		/* This will gain efficient xdp frame recycling at the
1041 		 * expense of more costly truesize accounting
1042 		 */
1043 		priv->frag_info[0].frag_stride = PAGE_SIZE;
1044 		priv->dma_dir = DMA_BIDIRECTIONAL;
1045 		priv->rx_headroom = XDP_PACKET_HEADROOM;
1046 		i = 1;
1047 	} else {
1048 		int frag_size_max = 2048, buf_size = 0;
1049 
1050 		/* should not happen, right ? */
1051 		if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
1052 			frag_size_max = PAGE_SIZE;
1053 
1054 		while (buf_size < eff_mtu) {
1055 			int frag_stride, frag_size = eff_mtu - buf_size;
1056 			int pad, nb;
1057 
1058 			if (i < MLX4_EN_MAX_RX_FRAGS - 1)
1059 				frag_size = min(frag_size, frag_size_max);
1060 
1061 			priv->frag_info[i].frag_size = frag_size;
1062 			frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
1063 			/* We can only pack 2 1536-bytes frames in on 4K page
1064 			 * Therefore, each frame would consume more bytes (truesize)
1065 			 */
1066 			nb = PAGE_SIZE / frag_stride;
1067 			pad = (PAGE_SIZE - nb * frag_stride) / nb;
1068 			pad &= ~(SMP_CACHE_BYTES - 1);
1069 			priv->frag_info[i].frag_stride = frag_stride + pad;
1070 
1071 			buf_size += frag_size;
1072 			i++;
1073 		}
1074 		priv->dma_dir = DMA_FROM_DEVICE;
1075 		priv->rx_headroom = 0;
1076 	}
1077 
1078 	priv->num_frags = i;
1079 	priv->rx_skb_size = eff_mtu;
1080 	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
1081 
1082 	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1083 	       eff_mtu, priv->num_frags);
1084 	for (i = 0; i < priv->num_frags; i++) {
1085 		en_dbg(DRV,
1086 		       priv,
1087 		       "  frag:%d - size:%d stride:%d\n",
1088 		       i,
1089 		       priv->frag_info[i].frag_size,
1090 		       priv->frag_info[i].frag_stride);
1091 	}
1092 }
1093 
1094 /* RSS related functions */
1095 
1096 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1097 				 struct mlx4_en_rx_ring *ring,
1098 				 enum mlx4_qp_state *state,
1099 				 struct mlx4_qp *qp)
1100 {
1101 	struct mlx4_en_dev *mdev = priv->mdev;
1102 	struct mlx4_qp_context *context;
1103 	int err = 0;
1104 
1105 	context = kzalloc(sizeof(*context), GFP_KERNEL);
1106 	if (!context)
1107 		return -ENOMEM;
1108 
1109 	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
1110 	if (err) {
1111 		en_err(priv, "Failed to allocate qp #%x\n", qpn);
1112 		goto out;
1113 	}
1114 	qp->event = mlx4_en_sqp_event;
1115 
1116 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1117 				qpn, ring->cqn, -1, context);
1118 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1119 
1120 	/* Cancel FCS removal if FW allows */
1121 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1122 		context->param3 |= cpu_to_be32(1 << 29);
1123 		if (priv->dev->features & NETIF_F_RXFCS)
1124 			ring->fcs_del = 0;
1125 		else
1126 			ring->fcs_del = ETH_FCS_LEN;
1127 	} else
1128 		ring->fcs_del = 0;
1129 
1130 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1131 	if (err) {
1132 		mlx4_qp_remove(mdev->dev, qp);
1133 		mlx4_qp_free(mdev->dev, qp);
1134 	}
1135 	mlx4_en_update_rx_prod_db(ring);
1136 out:
1137 	kfree(context);
1138 	return err;
1139 }
1140 
1141 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1142 {
1143 	int err;
1144 	u32 qpn;
1145 
1146 	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
1147 				    MLX4_RESERVE_A0_QP,
1148 				    MLX4_RES_USAGE_DRIVER);
1149 	if (err) {
1150 		en_err(priv, "Failed reserving drop qpn\n");
1151 		return err;
1152 	}
1153 	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1154 	if (err) {
1155 		en_err(priv, "Failed allocating drop qp\n");
1156 		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1157 		return err;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
1164 {
1165 	u32 qpn;
1166 
1167 	qpn = priv->drop_qp.qpn;
1168 	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
1169 	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
1170 	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1171 }
1172 
1173 /* Allocate rx qp's and configure them according to rss map */
1174 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1175 {
1176 	struct mlx4_en_dev *mdev = priv->mdev;
1177 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1178 	struct mlx4_qp_context context;
1179 	struct mlx4_rss_context *rss_context;
1180 	int rss_rings;
1181 	void *ptr;
1182 	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1183 			MLX4_RSS_TCP_IPV6);
1184 	int i, qpn;
1185 	int err = 0;
1186 	int good_qps = 0;
1187 	u8 flags;
1188 
1189 	en_dbg(DRV, priv, "Configuring rss steering\n");
1190 
1191 	flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
1192 	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
1193 				    priv->rx_ring_num,
1194 				    &rss_map->base_qpn, flags,
1195 				    MLX4_RES_USAGE_DRIVER);
1196 	if (err) {
1197 		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1198 		return err;
1199 	}
1200 
1201 	for (i = 0; i < priv->rx_ring_num; i++) {
1202 		qpn = rss_map->base_qpn + i;
1203 		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1204 					    &rss_map->state[i],
1205 					    &rss_map->qps[i]);
1206 		if (err)
1207 			goto rss_err;
1208 
1209 		++good_qps;
1210 	}
1211 
1212 	if (priv->rx_ring_num == 1) {
1213 		rss_map->indir_qp = &rss_map->qps[0];
1214 		priv->base_qpn = rss_map->indir_qp->qpn;
1215 		en_info(priv, "Optimized Non-RSS steering\n");
1216 		return 0;
1217 	}
1218 
1219 	rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
1220 	if (!rss_map->indir_qp) {
1221 		err = -ENOMEM;
1222 		goto rss_err;
1223 	}
1224 
1225 	/* Configure RSS indirection qp */
1226 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1227 	if (err) {
1228 		en_err(priv, "Failed to allocate RSS indirection QP\n");
1229 		goto qp_alloc_err;
1230 	}
1231 
1232 	rss_map->indir_qp->event = mlx4_en_sqp_event;
1233 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1234 				priv->rx_ring[0]->cqn, -1, &context);
1235 
1236 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1237 		rss_rings = priv->rx_ring_num;
1238 	else
1239 		rss_rings = priv->prof->rss_rings;
1240 
1241 	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
1242 					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1243 	rss_context = ptr;
1244 	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1245 					    (rss_map->base_qpn));
1246 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1247 	if (priv->mdev->profile.udp_rss) {
1248 		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
1249 		rss_context->base_qpn_udp = rss_context->default_qpn;
1250 	}
1251 
1252 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1253 		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
1254 		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
1255 	}
1256 
1257 	rss_context->flags = rss_mask;
1258 	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1259 	if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
1260 		rss_context->hash_fn = MLX4_RSS_HASH_XOR;
1261 	} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
1262 		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1263 		memcpy(rss_context->rss_key, priv->rss_key,
1264 		       MLX4_EN_RSS_KEY_SIZE);
1265 	} else {
1266 		en_err(priv, "Unknown RSS hash function requested\n");
1267 		err = -EINVAL;
1268 		goto indir_err;
1269 	}
1270 
1271 	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1272 			       rss_map->indir_qp, &rss_map->indir_state);
1273 	if (err)
1274 		goto indir_err;
1275 
1276 	return 0;
1277 
1278 indir_err:
1279 	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1280 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1281 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1282 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1283 qp_alloc_err:
1284 	kfree(rss_map->indir_qp);
1285 	rss_map->indir_qp = NULL;
1286 rss_err:
1287 	for (i = 0; i < good_qps; i++) {
1288 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1289 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1290 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1291 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1292 	}
1293 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1294 	return err;
1295 }
1296 
1297 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1298 {
1299 	struct mlx4_en_dev *mdev = priv->mdev;
1300 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1301 	int i;
1302 
1303 	if (priv->rx_ring_num > 1) {
1304 		mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1305 			       MLX4_QP_STATE_RST, NULL, 0, 0,
1306 			       rss_map->indir_qp);
1307 		mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1308 		mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1309 		kfree(rss_map->indir_qp);
1310 		rss_map->indir_qp = NULL;
1311 	}
1312 
1313 	for (i = 0; i < priv->rx_ring_num; i++) {
1314 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1315 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1316 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1317 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1318 	}
1319 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1320 }
1321