xref: /openbmc/linux/drivers/net/ethernet/amazon/ena/ena_netdev.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/numa.h>
42 #include <linux/pci.h>
43 #include <linux/utsname.h>
44 #include <linux/version.h>
45 #include <linux/vmalloc.h>
46 #include <net/ip.h>
47 
48 #include "ena_netdev.h"
49 #include <linux/bpf_trace.h>
50 #include "ena_pci_id_tbl.h"
51 
52 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
53 MODULE_DESCRIPTION(DEVICE_NAME);
54 MODULE_LICENSE("GPL");
55 
56 /* Time in jiffies before concluding the transmitter is hung. */
57 #define TX_TIMEOUT  (5 * HZ)
58 
59 #define ENA_NAPI_BUDGET 64
60 
61 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
62 		NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
63 static int debug = -1;
64 module_param(debug, int, 0);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66 
67 static struct ena_aenq_handlers aenq_handlers;
68 
69 static struct workqueue_struct *ena_wq;
70 
71 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
72 
73 static int ena_rss_init_default(struct ena_adapter *adapter);
74 static void check_for_admin_com_state(struct ena_adapter *adapter);
75 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
76 static int ena_restore_device(struct ena_adapter *adapter);
77 
78 static void ena_init_io_rings(struct ena_adapter *adapter,
79 			      int first_index, int count);
80 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
81 				   int count);
82 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
83 				  int count);
84 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
85 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
86 					   int first_index,
87 					   int count);
88 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
89 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
90 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
91 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
92 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
93 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
94 				      int first_index, int count);
95 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
96 				     int first_index, int count);
97 static int ena_up(struct ena_adapter *adapter);
98 static void ena_down(struct ena_adapter *adapter);
99 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
100 				 struct ena_ring *rx_ring);
101 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
102 				      struct ena_ring *rx_ring);
103 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
104 			      struct ena_tx_buffer *tx_info);
105 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
106 					    int first_index, int count);
107 
108 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
109 {
110 	struct ena_adapter *adapter = netdev_priv(dev);
111 
112 	/* Change the state of the device to trigger reset
113 	 * Check that we are not in the middle or a trigger already
114 	 */
115 
116 	if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
117 		return;
118 
119 	adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
120 	u64_stats_update_begin(&adapter->syncp);
121 	adapter->dev_stats.tx_timeout++;
122 	u64_stats_update_end(&adapter->syncp);
123 
124 	netif_err(adapter, tx_err, dev, "Transmit time out\n");
125 }
126 
127 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
128 {
129 	int i;
130 
131 	for (i = 0; i < adapter->num_io_queues; i++)
132 		adapter->rx_ring[i].mtu = mtu;
133 }
134 
135 static int ena_change_mtu(struct net_device *dev, int new_mtu)
136 {
137 	struct ena_adapter *adapter = netdev_priv(dev);
138 	int ret;
139 
140 	ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
141 	if (!ret) {
142 		netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
143 		update_rx_ring_mtu(adapter, new_mtu);
144 		dev->mtu = new_mtu;
145 	} else {
146 		netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
147 			  new_mtu);
148 	}
149 
150 	return ret;
151 }
152 
153 static int ena_xmit_common(struct net_device *dev,
154 			   struct ena_ring *ring,
155 			   struct ena_tx_buffer *tx_info,
156 			   struct ena_com_tx_ctx *ena_tx_ctx,
157 			   u16 next_to_use,
158 			   u32 bytes)
159 {
160 	struct ena_adapter *adapter = netdev_priv(dev);
161 	int rc, nb_hw_desc;
162 
163 	if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
164 						ena_tx_ctx))) {
165 		netif_dbg(adapter, tx_queued, dev,
166 			  "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
167 			  ring->qid);
168 		ena_com_write_sq_doorbell(ring->ena_com_io_sq);
169 	}
170 
171 	/* prepare the packet's descriptors to dma engine */
172 	rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
173 				&nb_hw_desc);
174 
175 	/* In case there isn't enough space in the queue for the packet,
176 	 * we simply drop it. All other failure reasons of
177 	 * ena_com_prepare_tx() are fatal and therefore require a device reset.
178 	 */
179 	if (unlikely(rc)) {
180 		netif_err(adapter, tx_queued, dev,
181 			  "failed to prepare tx bufs\n");
182 		u64_stats_update_begin(&ring->syncp);
183 		ring->tx_stats.prepare_ctx_err++;
184 		u64_stats_update_end(&ring->syncp);
185 		if (rc != -ENOMEM) {
186 			adapter->reset_reason =
187 				ENA_REGS_RESET_DRIVER_INVALID_STATE;
188 			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
189 		}
190 		return rc;
191 	}
192 
193 	u64_stats_update_begin(&ring->syncp);
194 	ring->tx_stats.cnt++;
195 	ring->tx_stats.bytes += bytes;
196 	u64_stats_update_end(&ring->syncp);
197 
198 	tx_info->tx_descs = nb_hw_desc;
199 	tx_info->last_jiffies = jiffies;
200 	tx_info->print_once = 0;
201 
202 	ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
203 						 ring->ring_size);
204 	return 0;
205 }
206 
207 /* This is the XDP napi callback. XDP queues use a separate napi callback
208  * than Rx/Tx queues.
209  */
210 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
211 {
212 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
213 	u32 xdp_work_done, xdp_budget;
214 	struct ena_ring *xdp_ring;
215 	int napi_comp_call = 0;
216 	int ret;
217 
218 	xdp_ring = ena_napi->xdp_ring;
219 	xdp_ring->first_interrupt = ena_napi->first_interrupt;
220 
221 	xdp_budget = budget;
222 
223 	if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
224 	    test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
225 		napi_complete_done(napi, 0);
226 		return 0;
227 	}
228 
229 	xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
230 
231 	/* If the device is about to reset or down, avoid unmask
232 	 * the interrupt and return 0 so NAPI won't reschedule
233 	 */
234 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
235 		napi_complete_done(napi, 0);
236 		ret = 0;
237 	} else if (xdp_budget > xdp_work_done) {
238 		napi_comp_call = 1;
239 		if (napi_complete_done(napi, xdp_work_done))
240 			ena_unmask_interrupt(xdp_ring, NULL);
241 		ena_update_ring_numa_node(xdp_ring, NULL);
242 		ret = xdp_work_done;
243 	} else {
244 		ret = xdp_budget;
245 	}
246 
247 	u64_stats_update_begin(&xdp_ring->syncp);
248 	xdp_ring->tx_stats.napi_comp += napi_comp_call;
249 	xdp_ring->tx_stats.tx_poll++;
250 	u64_stats_update_end(&xdp_ring->syncp);
251 
252 	return ret;
253 }
254 
255 static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
256 			       struct ena_tx_buffer *tx_info,
257 			       struct xdp_buff *xdp,
258 			       void **push_hdr,
259 			       u32 *push_len)
260 {
261 	struct ena_adapter *adapter = xdp_ring->adapter;
262 	struct ena_com_buf *ena_buf;
263 	dma_addr_t dma = 0;
264 	u32 size;
265 
266 	tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
267 	size = tx_info->xdpf->len;
268 	ena_buf = tx_info->bufs;
269 
270 	/* llq push buffer */
271 	*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
272 	*push_hdr = tx_info->xdpf->data;
273 
274 	if (size - *push_len > 0) {
275 		dma = dma_map_single(xdp_ring->dev,
276 				     *push_hdr + *push_len,
277 				     size - *push_len,
278 				     DMA_TO_DEVICE);
279 		if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
280 			goto error_report_dma_error;
281 
282 		tx_info->map_linear_data = 1;
283 		tx_info->num_of_bufs = 1;
284 	}
285 
286 	ena_buf->paddr = dma;
287 	ena_buf->len = size;
288 
289 	return 0;
290 
291 error_report_dma_error:
292 	u64_stats_update_begin(&xdp_ring->syncp);
293 	xdp_ring->tx_stats.dma_mapping_err++;
294 	u64_stats_update_end(&xdp_ring->syncp);
295 	netdev_warn(adapter->netdev, "failed to map xdp buff\n");
296 
297 	xdp_return_frame_rx_napi(tx_info->xdpf);
298 	tx_info->xdpf = NULL;
299 	tx_info->num_of_bufs = 0;
300 
301 	return -EINVAL;
302 }
303 
304 static int ena_xdp_xmit_buff(struct net_device *dev,
305 			     struct xdp_buff *xdp,
306 			     int qid,
307 			     struct ena_rx_buffer *rx_info)
308 {
309 	struct ena_adapter *adapter = netdev_priv(dev);
310 	struct ena_com_tx_ctx ena_tx_ctx = {};
311 	struct ena_tx_buffer *tx_info;
312 	struct ena_ring *xdp_ring;
313 	u16 next_to_use, req_id;
314 	int rc;
315 	void *push_hdr;
316 	u32 push_len;
317 
318 	xdp_ring = &adapter->tx_ring[qid];
319 	next_to_use = xdp_ring->next_to_use;
320 	req_id = xdp_ring->free_ids[next_to_use];
321 	tx_info = &xdp_ring->tx_buffer_info[req_id];
322 	tx_info->num_of_bufs = 0;
323 	page_ref_inc(rx_info->page);
324 	tx_info->xdp_rx_page = rx_info->page;
325 
326 	rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
327 	if (unlikely(rc))
328 		goto error_drop_packet;
329 
330 	ena_tx_ctx.ena_bufs = tx_info->bufs;
331 	ena_tx_ctx.push_header = push_hdr;
332 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
333 	ena_tx_ctx.req_id = req_id;
334 	ena_tx_ctx.header_len = push_len;
335 
336 	rc = ena_xmit_common(dev,
337 			     xdp_ring,
338 			     tx_info,
339 			     &ena_tx_ctx,
340 			     next_to_use,
341 			     xdp->data_end - xdp->data);
342 	if (rc)
343 		goto error_unmap_dma;
344 	/* trigger the dma engine. ena_com_write_sq_doorbell()
345 	 * has a mb
346 	 */
347 	ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
348 	u64_stats_update_begin(&xdp_ring->syncp);
349 	xdp_ring->tx_stats.doorbells++;
350 	u64_stats_update_end(&xdp_ring->syncp);
351 
352 	return NETDEV_TX_OK;
353 
354 error_unmap_dma:
355 	ena_unmap_tx_buff(xdp_ring, tx_info);
356 	tx_info->xdpf = NULL;
357 error_drop_packet:
358 	__free_page(tx_info->xdp_rx_page);
359 	return NETDEV_TX_OK;
360 }
361 
362 static int ena_xdp_execute(struct ena_ring *rx_ring,
363 			   struct xdp_buff *xdp,
364 			   struct ena_rx_buffer *rx_info)
365 {
366 	struct bpf_prog *xdp_prog;
367 	u32 verdict = XDP_PASS;
368 	u64 *xdp_stat;
369 
370 	rcu_read_lock();
371 	xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
372 
373 	if (!xdp_prog)
374 		goto out;
375 
376 	verdict = bpf_prog_run_xdp(xdp_prog, xdp);
377 
378 	if (verdict == XDP_TX) {
379 		ena_xdp_xmit_buff(rx_ring->netdev,
380 				  xdp,
381 				  rx_ring->qid + rx_ring->adapter->num_io_queues,
382 				  rx_info);
383 
384 		xdp_stat = &rx_ring->rx_stats.xdp_tx;
385 	} else if (unlikely(verdict == XDP_ABORTED)) {
386 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
387 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
388 	} else if (unlikely(verdict == XDP_DROP)) {
389 		xdp_stat = &rx_ring->rx_stats.xdp_drop;
390 	} else if (unlikely(verdict == XDP_PASS)) {
391 		xdp_stat = &rx_ring->rx_stats.xdp_pass;
392 	} else {
393 		bpf_warn_invalid_xdp_action(verdict);
394 		xdp_stat = &rx_ring->rx_stats.xdp_invalid;
395 	}
396 
397 	u64_stats_update_begin(&rx_ring->syncp);
398 	(*xdp_stat)++;
399 	u64_stats_update_end(&rx_ring->syncp);
400 out:
401 	rcu_read_unlock();
402 
403 	return verdict;
404 }
405 
406 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
407 {
408 	adapter->xdp_first_ring = adapter->num_io_queues;
409 	adapter->xdp_num_queues = adapter->num_io_queues;
410 
411 	ena_init_io_rings(adapter,
412 			  adapter->xdp_first_ring,
413 			  adapter->xdp_num_queues);
414 }
415 
416 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
417 {
418 	int rc = 0;
419 
420 	rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
421 					     adapter->xdp_num_queues);
422 	if (rc)
423 		goto setup_err;
424 
425 	rc = ena_create_io_tx_queues_in_range(adapter,
426 					      adapter->xdp_first_ring,
427 					      adapter->xdp_num_queues);
428 	if (rc)
429 		goto create_err;
430 
431 	return 0;
432 
433 create_err:
434 	ena_free_all_io_tx_resources(adapter);
435 setup_err:
436 	return rc;
437 }
438 
439 /* Provides a way for both kernel and bpf-prog to know
440  * more about the RX-queue a given XDP frame arrived on.
441  */
442 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
443 {
444 	int rc;
445 
446 	rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
447 
448 	if (rc) {
449 		netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
450 			  "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
451 			  rx_ring->qid, rc);
452 		goto err;
453 	}
454 
455 	rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
456 					NULL);
457 
458 	if (rc) {
459 		netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
460 			  "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
461 			  rx_ring->qid, rc);
462 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
463 	}
464 
465 err:
466 	return rc;
467 }
468 
469 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
470 {
471 	xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
472 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
473 }
474 
475 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
476 						 struct bpf_prog *prog,
477 						 int first, int count)
478 {
479 	struct ena_ring *rx_ring;
480 	int i = 0;
481 
482 	for (i = first; i < count; i++) {
483 		rx_ring = &adapter->rx_ring[i];
484 		xchg(&rx_ring->xdp_bpf_prog, prog);
485 		if (prog) {
486 			ena_xdp_register_rxq_info(rx_ring);
487 			rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
488 		} else {
489 			ena_xdp_unregister_rxq_info(rx_ring);
490 			rx_ring->rx_headroom = 0;
491 		}
492 	}
493 }
494 
495 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
496 				     struct bpf_prog *prog)
497 {
498 	struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
499 
500 	ena_xdp_exchange_program_rx_in_range(adapter,
501 					     prog,
502 					     0,
503 					     adapter->num_io_queues);
504 
505 	if (old_bpf_prog)
506 		bpf_prog_put(old_bpf_prog);
507 }
508 
509 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
510 {
511 	bool was_up;
512 	int rc;
513 
514 	was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
515 
516 	if (was_up)
517 		ena_down(adapter);
518 
519 	adapter->xdp_first_ring = 0;
520 	adapter->xdp_num_queues = 0;
521 	ena_xdp_exchange_program(adapter, NULL);
522 	if (was_up) {
523 		rc = ena_up(adapter);
524 		if (rc)
525 			return rc;
526 	}
527 	return 0;
528 }
529 
530 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
531 {
532 	struct ena_adapter *adapter = netdev_priv(netdev);
533 	struct bpf_prog *prog = bpf->prog;
534 	struct bpf_prog *old_bpf_prog;
535 	int rc, prev_mtu;
536 	bool is_up;
537 
538 	is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
539 	rc = ena_xdp_allowed(adapter);
540 	if (rc == ENA_XDP_ALLOWED) {
541 		old_bpf_prog = adapter->xdp_bpf_prog;
542 		if (prog) {
543 			if (!is_up) {
544 				ena_init_all_xdp_queues(adapter);
545 			} else if (!old_bpf_prog) {
546 				ena_down(adapter);
547 				ena_init_all_xdp_queues(adapter);
548 			}
549 			ena_xdp_exchange_program(adapter, prog);
550 
551 			if (is_up && !old_bpf_prog) {
552 				rc = ena_up(adapter);
553 				if (rc)
554 					return rc;
555 			}
556 		} else if (old_bpf_prog) {
557 			rc = ena_destroy_and_free_all_xdp_queues(adapter);
558 			if (rc)
559 				return rc;
560 		}
561 
562 		prev_mtu = netdev->max_mtu;
563 		netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
564 
565 		if (!old_bpf_prog)
566 			netif_info(adapter, drv, adapter->netdev,
567 				   "xdp program set, changing the max_mtu from %d to %d",
568 				   prev_mtu, netdev->max_mtu);
569 
570 	} else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
571 		netif_err(adapter, drv, adapter->netdev,
572 			  "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
573 			  netdev->mtu, ENA_XDP_MAX_MTU);
574 		NL_SET_ERR_MSG_MOD(bpf->extack,
575 				   "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
576 		return -EINVAL;
577 	} else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
578 		netif_err(adapter, drv, adapter->netdev,
579 			  "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
580 			  adapter->num_io_queues, adapter->max_num_io_queues);
581 		NL_SET_ERR_MSG_MOD(bpf->extack,
582 				   "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
583 		return -EINVAL;
584 	}
585 
586 	return 0;
587 }
588 
589 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
590  * program as well as to query the current xdp program id.
591  */
592 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
593 {
594 	switch (bpf->command) {
595 	case XDP_SETUP_PROG:
596 		return ena_xdp_set(netdev, bpf);
597 	default:
598 		return -EINVAL;
599 	}
600 	return 0;
601 }
602 
603 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
604 {
605 #ifdef CONFIG_RFS_ACCEL
606 	u32 i;
607 	int rc;
608 
609 	adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
610 	if (!adapter->netdev->rx_cpu_rmap)
611 		return -ENOMEM;
612 	for (i = 0; i < adapter->num_io_queues; i++) {
613 		int irq_idx = ENA_IO_IRQ_IDX(i);
614 
615 		rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
616 				      pci_irq_vector(adapter->pdev, irq_idx));
617 		if (rc) {
618 			free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
619 			adapter->netdev->rx_cpu_rmap = NULL;
620 			return rc;
621 		}
622 	}
623 #endif /* CONFIG_RFS_ACCEL */
624 	return 0;
625 }
626 
627 static void ena_init_io_rings_common(struct ena_adapter *adapter,
628 				     struct ena_ring *ring, u16 qid)
629 {
630 	ring->qid = qid;
631 	ring->pdev = adapter->pdev;
632 	ring->dev = &adapter->pdev->dev;
633 	ring->netdev = adapter->netdev;
634 	ring->napi = &adapter->ena_napi[qid].napi;
635 	ring->adapter = adapter;
636 	ring->ena_dev = adapter->ena_dev;
637 	ring->per_napi_packets = 0;
638 	ring->cpu = 0;
639 	ring->first_interrupt = false;
640 	ring->no_interrupt_event_cnt = 0;
641 	u64_stats_init(&ring->syncp);
642 }
643 
644 static void ena_init_io_rings(struct ena_adapter *adapter,
645 			      int first_index, int count)
646 {
647 	struct ena_com_dev *ena_dev;
648 	struct ena_ring *txr, *rxr;
649 	int i;
650 
651 	ena_dev = adapter->ena_dev;
652 
653 	for (i = first_index; i < first_index + count; i++) {
654 		txr = &adapter->tx_ring[i];
655 		rxr = &adapter->rx_ring[i];
656 
657 		/* TX common ring state */
658 		ena_init_io_rings_common(adapter, txr, i);
659 
660 		/* TX specific ring state */
661 		txr->ring_size = adapter->requested_tx_ring_size;
662 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
663 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
664 		txr->sgl_size = adapter->max_tx_sgl_size;
665 		txr->smoothed_interval =
666 			ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
667 		txr->disable_meta_caching = adapter->disable_meta_caching;
668 
669 		/* Don't init RX queues for xdp queues */
670 		if (!ENA_IS_XDP_INDEX(adapter, i)) {
671 			/* RX common ring state */
672 			ena_init_io_rings_common(adapter, rxr, i);
673 
674 			/* RX specific ring state */
675 			rxr->ring_size = adapter->requested_rx_ring_size;
676 			rxr->rx_copybreak = adapter->rx_copybreak;
677 			rxr->sgl_size = adapter->max_rx_sgl_size;
678 			rxr->smoothed_interval =
679 				ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
680 			rxr->empty_rx_queue = 0;
681 			adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
682 		}
683 	}
684 }
685 
686 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
687  * @adapter: network interface device structure
688  * @qid: queue index
689  *
690  * Return 0 on success, negative on failure
691  */
692 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
693 {
694 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
695 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
696 	int size, i, node;
697 
698 	if (tx_ring->tx_buffer_info) {
699 		netif_err(adapter, ifup,
700 			  adapter->netdev, "tx_buffer_info info is not NULL");
701 		return -EEXIST;
702 	}
703 
704 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
705 	node = cpu_to_node(ena_irq->cpu);
706 
707 	tx_ring->tx_buffer_info = vzalloc_node(size, node);
708 	if (!tx_ring->tx_buffer_info) {
709 		tx_ring->tx_buffer_info = vzalloc(size);
710 		if (!tx_ring->tx_buffer_info)
711 			goto err_tx_buffer_info;
712 	}
713 
714 	size = sizeof(u16) * tx_ring->ring_size;
715 	tx_ring->free_ids = vzalloc_node(size, node);
716 	if (!tx_ring->free_ids) {
717 		tx_ring->free_ids = vzalloc(size);
718 		if (!tx_ring->free_ids)
719 			goto err_tx_free_ids;
720 	}
721 
722 	size = tx_ring->tx_max_header_size;
723 	tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
724 	if (!tx_ring->push_buf_intermediate_buf) {
725 		tx_ring->push_buf_intermediate_buf = vzalloc(size);
726 		if (!tx_ring->push_buf_intermediate_buf)
727 			goto err_push_buf_intermediate_buf;
728 	}
729 
730 	/* Req id ring for TX out of order completions */
731 	for (i = 0; i < tx_ring->ring_size; i++)
732 		tx_ring->free_ids[i] = i;
733 
734 	/* Reset tx statistics */
735 	memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
736 
737 	tx_ring->next_to_use = 0;
738 	tx_ring->next_to_clean = 0;
739 	tx_ring->cpu = ena_irq->cpu;
740 	return 0;
741 
742 err_push_buf_intermediate_buf:
743 	vfree(tx_ring->free_ids);
744 	tx_ring->free_ids = NULL;
745 err_tx_free_ids:
746 	vfree(tx_ring->tx_buffer_info);
747 	tx_ring->tx_buffer_info = NULL;
748 err_tx_buffer_info:
749 	return -ENOMEM;
750 }
751 
752 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
753  * @adapter: network interface device structure
754  * @qid: queue index
755  *
756  * Free all transmit software resources
757  */
758 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
759 {
760 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
761 
762 	vfree(tx_ring->tx_buffer_info);
763 	tx_ring->tx_buffer_info = NULL;
764 
765 	vfree(tx_ring->free_ids);
766 	tx_ring->free_ids = NULL;
767 
768 	vfree(tx_ring->push_buf_intermediate_buf);
769 	tx_ring->push_buf_intermediate_buf = NULL;
770 }
771 
772 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
773 					   int first_index,
774 					   int count)
775 {
776 	int i, rc = 0;
777 
778 	for (i = first_index; i < first_index + count; i++) {
779 		rc = ena_setup_tx_resources(adapter, i);
780 		if (rc)
781 			goto err_setup_tx;
782 	}
783 
784 	return 0;
785 
786 err_setup_tx:
787 
788 	netif_err(adapter, ifup, adapter->netdev,
789 		  "Tx queue %d: allocation failed\n", i);
790 
791 	/* rewind the index freeing the rings as we go */
792 	while (first_index < i--)
793 		ena_free_tx_resources(adapter, i);
794 	return rc;
795 }
796 
797 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
798 						  int first_index, int count)
799 {
800 	int i;
801 
802 	for (i = first_index; i < first_index + count; i++)
803 		ena_free_tx_resources(adapter, i);
804 }
805 
806 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
807  * @adapter: board private structure
808  *
809  * Free all transmit software resources
810  */
811 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
812 {
813 	ena_free_all_io_tx_resources_in_range(adapter,
814 					      0,
815 					      adapter->xdp_num_queues +
816 					      adapter->num_io_queues);
817 }
818 
819 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
820 {
821 	if (likely(req_id < rx_ring->ring_size))
822 		return 0;
823 
824 	netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
825 		  "Invalid rx req_id: %hu\n", req_id);
826 
827 	u64_stats_update_begin(&rx_ring->syncp);
828 	rx_ring->rx_stats.bad_req_id++;
829 	u64_stats_update_end(&rx_ring->syncp);
830 
831 	/* Trigger device reset */
832 	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
833 	set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
834 	return -EFAULT;
835 }
836 
837 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
838  * @adapter: network interface device structure
839  * @qid: queue index
840  *
841  * Returns 0 on success, negative on failure
842  */
843 static int ena_setup_rx_resources(struct ena_adapter *adapter,
844 				  u32 qid)
845 {
846 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
847 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
848 	int size, node, i;
849 
850 	if (rx_ring->rx_buffer_info) {
851 		netif_err(adapter, ifup, adapter->netdev,
852 			  "rx_buffer_info is not NULL");
853 		return -EEXIST;
854 	}
855 
856 	/* alloc extra element so in rx path
857 	 * we can always prefetch rx_info + 1
858 	 */
859 	size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
860 	node = cpu_to_node(ena_irq->cpu);
861 
862 	rx_ring->rx_buffer_info = vzalloc_node(size, node);
863 	if (!rx_ring->rx_buffer_info) {
864 		rx_ring->rx_buffer_info = vzalloc(size);
865 		if (!rx_ring->rx_buffer_info)
866 			return -ENOMEM;
867 	}
868 
869 	size = sizeof(u16) * rx_ring->ring_size;
870 	rx_ring->free_ids = vzalloc_node(size, node);
871 	if (!rx_ring->free_ids) {
872 		rx_ring->free_ids = vzalloc(size);
873 		if (!rx_ring->free_ids) {
874 			vfree(rx_ring->rx_buffer_info);
875 			rx_ring->rx_buffer_info = NULL;
876 			return -ENOMEM;
877 		}
878 	}
879 
880 	/* Req id ring for receiving RX pkts out of order */
881 	for (i = 0; i < rx_ring->ring_size; i++)
882 		rx_ring->free_ids[i] = i;
883 
884 	/* Reset rx statistics */
885 	memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
886 
887 	rx_ring->next_to_clean = 0;
888 	rx_ring->next_to_use = 0;
889 	rx_ring->cpu = ena_irq->cpu;
890 
891 	return 0;
892 }
893 
894 /* ena_free_rx_resources - Free I/O Rx Resources
895  * @adapter: network interface device structure
896  * @qid: queue index
897  *
898  * Free all receive software resources
899  */
900 static void ena_free_rx_resources(struct ena_adapter *adapter,
901 				  u32 qid)
902 {
903 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
904 
905 	vfree(rx_ring->rx_buffer_info);
906 	rx_ring->rx_buffer_info = NULL;
907 
908 	vfree(rx_ring->free_ids);
909 	rx_ring->free_ids = NULL;
910 }
911 
912 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
913  * @adapter: board private structure
914  *
915  * Return 0 on success, negative on failure
916  */
917 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
918 {
919 	int i, rc = 0;
920 
921 	for (i = 0; i < adapter->num_io_queues; i++) {
922 		rc = ena_setup_rx_resources(adapter, i);
923 		if (rc)
924 			goto err_setup_rx;
925 	}
926 
927 	return 0;
928 
929 err_setup_rx:
930 
931 	netif_err(adapter, ifup, adapter->netdev,
932 		  "Rx queue %d: allocation failed\n", i);
933 
934 	/* rewind the index freeing the rings as we go */
935 	while (i--)
936 		ena_free_rx_resources(adapter, i);
937 	return rc;
938 }
939 
940 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
941  * @adapter: board private structure
942  *
943  * Free all receive software resources
944  */
945 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
946 {
947 	int i;
948 
949 	for (i = 0; i < adapter->num_io_queues; i++)
950 		ena_free_rx_resources(adapter, i);
951 }
952 
953 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
954 				    struct ena_rx_buffer *rx_info, gfp_t gfp)
955 {
956 	struct ena_com_buf *ena_buf;
957 	struct page *page;
958 	dma_addr_t dma;
959 
960 	/* if previous allocated page is not used */
961 	if (unlikely(rx_info->page))
962 		return 0;
963 
964 	page = alloc_page(gfp);
965 	if (unlikely(!page)) {
966 		u64_stats_update_begin(&rx_ring->syncp);
967 		rx_ring->rx_stats.page_alloc_fail++;
968 		u64_stats_update_end(&rx_ring->syncp);
969 		return -ENOMEM;
970 	}
971 
972 	/* To enable NIC-side port-mirroring, AKA SPAN port,
973 	 * we make the buffer readable from the nic as well
974 	 */
975 	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
976 			   DMA_BIDIRECTIONAL);
977 	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
978 		u64_stats_update_begin(&rx_ring->syncp);
979 		rx_ring->rx_stats.dma_mapping_err++;
980 		u64_stats_update_end(&rx_ring->syncp);
981 
982 		__free_page(page);
983 		return -EIO;
984 	}
985 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
986 		  "alloc page %p, rx_info %p\n", page, rx_info);
987 
988 	rx_info->page = page;
989 	rx_info->page_offset = 0;
990 	ena_buf = &rx_info->ena_buf;
991 	ena_buf->paddr = dma + rx_ring->rx_headroom;
992 	ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
993 
994 	return 0;
995 }
996 
997 static void ena_free_rx_page(struct ena_ring *rx_ring,
998 			     struct ena_rx_buffer *rx_info)
999 {
1000 	struct page *page = rx_info->page;
1001 	struct ena_com_buf *ena_buf = &rx_info->ena_buf;
1002 
1003 	if (unlikely(!page)) {
1004 		netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1005 			   "Trying to free unallocated buffer\n");
1006 		return;
1007 	}
1008 
1009 	dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
1010 		       ENA_PAGE_SIZE,
1011 		       DMA_BIDIRECTIONAL);
1012 
1013 	__free_page(page);
1014 	rx_info->page = NULL;
1015 }
1016 
1017 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1018 {
1019 	u16 next_to_use, req_id;
1020 	u32 i;
1021 	int rc;
1022 
1023 	next_to_use = rx_ring->next_to_use;
1024 
1025 	for (i = 0; i < num; i++) {
1026 		struct ena_rx_buffer *rx_info;
1027 
1028 		req_id = rx_ring->free_ids[next_to_use];
1029 
1030 		rx_info = &rx_ring->rx_buffer_info[req_id];
1031 
1032 		rc = ena_alloc_rx_page(rx_ring, rx_info,
1033 				       GFP_ATOMIC | __GFP_COMP);
1034 		if (unlikely(rc < 0)) {
1035 			netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1036 				   "failed to alloc buffer for rx queue %d\n",
1037 				   rx_ring->qid);
1038 			break;
1039 		}
1040 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1041 						&rx_info->ena_buf,
1042 						req_id);
1043 		if (unlikely(rc)) {
1044 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1045 				   "failed to add buffer for rx queue %d\n",
1046 				   rx_ring->qid);
1047 			break;
1048 		}
1049 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1050 						   rx_ring->ring_size);
1051 	}
1052 
1053 	if (unlikely(i < num)) {
1054 		u64_stats_update_begin(&rx_ring->syncp);
1055 		rx_ring->rx_stats.refil_partial++;
1056 		u64_stats_update_end(&rx_ring->syncp);
1057 		netdev_warn(rx_ring->netdev,
1058 			    "refilled rx qid %d with only %d buffers (from %d)\n",
1059 			    rx_ring->qid, i, num);
1060 	}
1061 
1062 	/* ena_com_write_sq_doorbell issues a wmb() */
1063 	if (likely(i))
1064 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1065 
1066 	rx_ring->next_to_use = next_to_use;
1067 
1068 	return i;
1069 }
1070 
1071 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1072 			     u32 qid)
1073 {
1074 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1075 	u32 i;
1076 
1077 	for (i = 0; i < rx_ring->ring_size; i++) {
1078 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1079 
1080 		if (rx_info->page)
1081 			ena_free_rx_page(rx_ring, rx_info);
1082 	}
1083 }
1084 
1085 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1086  * @adapter: board private structure
1087  */
1088 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1089 {
1090 	struct ena_ring *rx_ring;
1091 	int i, rc, bufs_num;
1092 
1093 	for (i = 0; i < adapter->num_io_queues; i++) {
1094 		rx_ring = &adapter->rx_ring[i];
1095 		bufs_num = rx_ring->ring_size - 1;
1096 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1097 
1098 		if (unlikely(rc != bufs_num))
1099 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1100 				   "refilling Queue %d failed. allocated %d buffers from: %d\n",
1101 				   i, rc, bufs_num);
1102 	}
1103 }
1104 
1105 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1106 {
1107 	int i;
1108 
1109 	for (i = 0; i < adapter->num_io_queues; i++)
1110 		ena_free_rx_bufs(adapter, i);
1111 }
1112 
1113 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1114 			      struct ena_tx_buffer *tx_info)
1115 {
1116 	struct ena_com_buf *ena_buf;
1117 	u32 cnt;
1118 	int i;
1119 
1120 	ena_buf = tx_info->bufs;
1121 	cnt = tx_info->num_of_bufs;
1122 
1123 	if (unlikely(!cnt))
1124 		return;
1125 
1126 	if (tx_info->map_linear_data) {
1127 		dma_unmap_single(tx_ring->dev,
1128 				 dma_unmap_addr(ena_buf, paddr),
1129 				 dma_unmap_len(ena_buf, len),
1130 				 DMA_TO_DEVICE);
1131 		ena_buf++;
1132 		cnt--;
1133 	}
1134 
1135 	/* unmap remaining mapped pages */
1136 	for (i = 0; i < cnt; i++) {
1137 		dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1138 			       dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1139 		ena_buf++;
1140 	}
1141 }
1142 
1143 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1144  * @tx_ring: TX ring for which buffers be freed
1145  */
1146 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1147 {
1148 	bool print_once = true;
1149 	u32 i;
1150 
1151 	for (i = 0; i < tx_ring->ring_size; i++) {
1152 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1153 
1154 		if (!tx_info->skb)
1155 			continue;
1156 
1157 		if (print_once) {
1158 			netdev_notice(tx_ring->netdev,
1159 				      "free uncompleted tx skb qid %d idx 0x%x\n",
1160 				      tx_ring->qid, i);
1161 			print_once = false;
1162 		} else {
1163 			netdev_dbg(tx_ring->netdev,
1164 				   "free uncompleted tx skb qid %d idx 0x%x\n",
1165 				   tx_ring->qid, i);
1166 		}
1167 
1168 		ena_unmap_tx_buff(tx_ring, tx_info);
1169 
1170 		dev_kfree_skb_any(tx_info->skb);
1171 	}
1172 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1173 						  tx_ring->qid));
1174 }
1175 
1176 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1177 {
1178 	struct ena_ring *tx_ring;
1179 	int i;
1180 
1181 	for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1182 		tx_ring = &adapter->tx_ring[i];
1183 		ena_free_tx_bufs(tx_ring);
1184 	}
1185 }
1186 
1187 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1188 {
1189 	u16 ena_qid;
1190 	int i;
1191 
1192 	for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1193 		ena_qid = ENA_IO_TXQ_IDX(i);
1194 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1195 	}
1196 }
1197 
1198 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1199 {
1200 	u16 ena_qid;
1201 	int i;
1202 
1203 	for (i = 0; i < adapter->num_io_queues; i++) {
1204 		ena_qid = ENA_IO_RXQ_IDX(i);
1205 		cancel_work_sync(&adapter->ena_napi[i].dim.work);
1206 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1207 	}
1208 }
1209 
1210 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1211 {
1212 	ena_destroy_all_tx_queues(adapter);
1213 	ena_destroy_all_rx_queues(adapter);
1214 }
1215 
1216 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1217 				 struct ena_tx_buffer *tx_info, bool is_xdp)
1218 {
1219 	if (tx_info)
1220 		netif_err(ring->adapter,
1221 			  tx_done,
1222 			  ring->netdev,
1223 			  "tx_info doesn't have valid %s",
1224 			   is_xdp ? "xdp frame" : "skb");
1225 	else
1226 		netif_err(ring->adapter,
1227 			  tx_done,
1228 			  ring->netdev,
1229 			  "Invalid req_id: %hu\n",
1230 			  req_id);
1231 
1232 	u64_stats_update_begin(&ring->syncp);
1233 	ring->tx_stats.bad_req_id++;
1234 	u64_stats_update_end(&ring->syncp);
1235 
1236 	/* Trigger device reset */
1237 	ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1238 	set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1239 	return -EFAULT;
1240 }
1241 
1242 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1243 {
1244 	struct ena_tx_buffer *tx_info = NULL;
1245 
1246 	if (likely(req_id < tx_ring->ring_size)) {
1247 		tx_info = &tx_ring->tx_buffer_info[req_id];
1248 		if (likely(tx_info->skb))
1249 			return 0;
1250 	}
1251 
1252 	return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1253 }
1254 
1255 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1256 {
1257 	struct ena_tx_buffer *tx_info = NULL;
1258 
1259 	if (likely(req_id < xdp_ring->ring_size)) {
1260 		tx_info = &xdp_ring->tx_buffer_info[req_id];
1261 		if (likely(tx_info->xdpf))
1262 			return 0;
1263 	}
1264 
1265 	return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1266 }
1267 
1268 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1269 {
1270 	struct netdev_queue *txq;
1271 	bool above_thresh;
1272 	u32 tx_bytes = 0;
1273 	u32 total_done = 0;
1274 	u16 next_to_clean;
1275 	u16 req_id;
1276 	int tx_pkts = 0;
1277 	int rc;
1278 
1279 	next_to_clean = tx_ring->next_to_clean;
1280 	txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1281 
1282 	while (tx_pkts < budget) {
1283 		struct ena_tx_buffer *tx_info;
1284 		struct sk_buff *skb;
1285 
1286 		rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1287 						&req_id);
1288 		if (rc)
1289 			break;
1290 
1291 		rc = validate_tx_req_id(tx_ring, req_id);
1292 		if (rc)
1293 			break;
1294 
1295 		tx_info = &tx_ring->tx_buffer_info[req_id];
1296 		skb = tx_info->skb;
1297 
1298 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1299 		prefetch(&skb->end);
1300 
1301 		tx_info->skb = NULL;
1302 		tx_info->last_jiffies = 0;
1303 
1304 		ena_unmap_tx_buff(tx_ring, tx_info);
1305 
1306 		netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1307 			  "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1308 			  skb);
1309 
1310 		tx_bytes += skb->len;
1311 		dev_kfree_skb(skb);
1312 		tx_pkts++;
1313 		total_done += tx_info->tx_descs;
1314 
1315 		tx_ring->free_ids[next_to_clean] = req_id;
1316 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1317 						     tx_ring->ring_size);
1318 	}
1319 
1320 	tx_ring->next_to_clean = next_to_clean;
1321 	ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1322 	ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1323 
1324 	netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1325 
1326 	netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1327 		  "tx_poll: q %d done. total pkts: %d\n",
1328 		  tx_ring->qid, tx_pkts);
1329 
1330 	/* need to make the rings circular update visible to
1331 	 * ena_start_xmit() before checking for netif_queue_stopped().
1332 	 */
1333 	smp_mb();
1334 
1335 	above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1336 						    ENA_TX_WAKEUP_THRESH);
1337 	if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1338 		__netif_tx_lock(txq, smp_processor_id());
1339 		above_thresh =
1340 			ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1341 						     ENA_TX_WAKEUP_THRESH);
1342 		if (netif_tx_queue_stopped(txq) && above_thresh &&
1343 		    test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1344 			netif_tx_wake_queue(txq);
1345 			u64_stats_update_begin(&tx_ring->syncp);
1346 			tx_ring->tx_stats.queue_wakeup++;
1347 			u64_stats_update_end(&tx_ring->syncp);
1348 		}
1349 		__netif_tx_unlock(txq);
1350 	}
1351 
1352 	return tx_pkts;
1353 }
1354 
1355 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1356 {
1357 	struct sk_buff *skb;
1358 
1359 	if (frags)
1360 		skb = napi_get_frags(rx_ring->napi);
1361 	else
1362 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1363 						rx_ring->rx_copybreak);
1364 
1365 	if (unlikely(!skb)) {
1366 		u64_stats_update_begin(&rx_ring->syncp);
1367 		rx_ring->rx_stats.skb_alloc_fail++;
1368 		u64_stats_update_end(&rx_ring->syncp);
1369 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1370 			  "Failed to allocate skb. frags: %d\n", frags);
1371 		return NULL;
1372 	}
1373 
1374 	return skb;
1375 }
1376 
1377 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1378 				  struct ena_com_rx_buf_info *ena_bufs,
1379 				  u32 descs,
1380 				  u16 *next_to_clean)
1381 {
1382 	struct sk_buff *skb;
1383 	struct ena_rx_buffer *rx_info;
1384 	u16 len, req_id, buf = 0;
1385 	void *va;
1386 	int rc;
1387 
1388 	len = ena_bufs[buf].len;
1389 	req_id = ena_bufs[buf].req_id;
1390 
1391 	rc = validate_rx_req_id(rx_ring, req_id);
1392 	if (unlikely(rc < 0))
1393 		return NULL;
1394 
1395 	rx_info = &rx_ring->rx_buffer_info[req_id];
1396 
1397 	if (unlikely(!rx_info->page)) {
1398 		netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1399 			  "Page is NULL\n");
1400 		return NULL;
1401 	}
1402 
1403 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1404 		  "rx_info %p page %p\n",
1405 		  rx_info, rx_info->page);
1406 
1407 	/* save virt address of first buffer */
1408 	va = page_address(rx_info->page) + rx_info->page_offset;
1409 	prefetch(va + NET_IP_ALIGN);
1410 
1411 	if (len <= rx_ring->rx_copybreak) {
1412 		skb = ena_alloc_skb(rx_ring, false);
1413 		if (unlikely(!skb))
1414 			return NULL;
1415 
1416 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1417 			  "rx allocated small packet. len %d. data_len %d\n",
1418 			  skb->len, skb->data_len);
1419 
1420 		/* sync this buffer for CPU use */
1421 		dma_sync_single_for_cpu(rx_ring->dev,
1422 					dma_unmap_addr(&rx_info->ena_buf, paddr),
1423 					len,
1424 					DMA_FROM_DEVICE);
1425 		skb_copy_to_linear_data(skb, va, len);
1426 		dma_sync_single_for_device(rx_ring->dev,
1427 					   dma_unmap_addr(&rx_info->ena_buf, paddr),
1428 					   len,
1429 					   DMA_FROM_DEVICE);
1430 
1431 		skb_put(skb, len);
1432 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1433 		rx_ring->free_ids[*next_to_clean] = req_id;
1434 		*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1435 						     rx_ring->ring_size);
1436 		return skb;
1437 	}
1438 
1439 	skb = ena_alloc_skb(rx_ring, true);
1440 	if (unlikely(!skb))
1441 		return NULL;
1442 
1443 	do {
1444 		dma_unmap_page(rx_ring->dev,
1445 			       dma_unmap_addr(&rx_info->ena_buf, paddr),
1446 			       ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
1447 
1448 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1449 				rx_info->page_offset, len, ENA_PAGE_SIZE);
1450 		/* The offset is non zero only for the first buffer */
1451 		rx_info->page_offset = 0;
1452 
1453 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1454 			  "rx skb updated. len %d. data_len %d\n",
1455 			  skb->len, skb->data_len);
1456 
1457 		rx_info->page = NULL;
1458 
1459 		rx_ring->free_ids[*next_to_clean] = req_id;
1460 		*next_to_clean =
1461 			ENA_RX_RING_IDX_NEXT(*next_to_clean,
1462 					     rx_ring->ring_size);
1463 		if (likely(--descs == 0))
1464 			break;
1465 
1466 		buf++;
1467 		len = ena_bufs[buf].len;
1468 		req_id = ena_bufs[buf].req_id;
1469 
1470 		rc = validate_rx_req_id(rx_ring, req_id);
1471 		if (unlikely(rc < 0))
1472 			return NULL;
1473 
1474 		rx_info = &rx_ring->rx_buffer_info[req_id];
1475 	} while (1);
1476 
1477 	return skb;
1478 }
1479 
1480 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1481  * @adapter: structure containing adapter specific data
1482  * @ena_rx_ctx: received packet context/metadata
1483  * @skb: skb currently being received and modified
1484  */
1485 static void ena_rx_checksum(struct ena_ring *rx_ring,
1486 				   struct ena_com_rx_ctx *ena_rx_ctx,
1487 				   struct sk_buff *skb)
1488 {
1489 	/* Rx csum disabled */
1490 	if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1491 		skb->ip_summed = CHECKSUM_NONE;
1492 		return;
1493 	}
1494 
1495 	/* For fragmented packets the checksum isn't valid */
1496 	if (ena_rx_ctx->frag) {
1497 		skb->ip_summed = CHECKSUM_NONE;
1498 		return;
1499 	}
1500 
1501 	/* if IP and error */
1502 	if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1503 		     (ena_rx_ctx->l3_csum_err))) {
1504 		/* ipv4 checksum error */
1505 		skb->ip_summed = CHECKSUM_NONE;
1506 		u64_stats_update_begin(&rx_ring->syncp);
1507 		rx_ring->rx_stats.bad_csum++;
1508 		u64_stats_update_end(&rx_ring->syncp);
1509 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1510 			  "RX IPv4 header checksum error\n");
1511 		return;
1512 	}
1513 
1514 	/* if TCP/UDP */
1515 	if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1516 		   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1517 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
1518 			/* TCP/UDP checksum error */
1519 			u64_stats_update_begin(&rx_ring->syncp);
1520 			rx_ring->rx_stats.bad_csum++;
1521 			u64_stats_update_end(&rx_ring->syncp);
1522 			netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1523 				  "RX L4 checksum error\n");
1524 			skb->ip_summed = CHECKSUM_NONE;
1525 			return;
1526 		}
1527 
1528 		if (likely(ena_rx_ctx->l4_csum_checked)) {
1529 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1530 			u64_stats_update_begin(&rx_ring->syncp);
1531 			rx_ring->rx_stats.csum_good++;
1532 			u64_stats_update_end(&rx_ring->syncp);
1533 		} else {
1534 			u64_stats_update_begin(&rx_ring->syncp);
1535 			rx_ring->rx_stats.csum_unchecked++;
1536 			u64_stats_update_end(&rx_ring->syncp);
1537 			skb->ip_summed = CHECKSUM_NONE;
1538 		}
1539 	} else {
1540 		skb->ip_summed = CHECKSUM_NONE;
1541 		return;
1542 	}
1543 
1544 }
1545 
1546 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1547 			    struct ena_com_rx_ctx *ena_rx_ctx,
1548 			    struct sk_buff *skb)
1549 {
1550 	enum pkt_hash_types hash_type;
1551 
1552 	if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1553 		if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1554 			   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1555 
1556 			hash_type = PKT_HASH_TYPE_L4;
1557 		else
1558 			hash_type = PKT_HASH_TYPE_NONE;
1559 
1560 		/* Override hash type if the packet is fragmented */
1561 		if (ena_rx_ctx->frag)
1562 			hash_type = PKT_HASH_TYPE_NONE;
1563 
1564 		skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1565 	}
1566 }
1567 
1568 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1569 {
1570 	struct ena_rx_buffer *rx_info;
1571 	int ret;
1572 
1573 	rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1574 	xdp->data = page_address(rx_info->page) +
1575 		rx_info->page_offset + rx_ring->rx_headroom;
1576 	xdp_set_data_meta_invalid(xdp);
1577 	xdp->data_hard_start = page_address(rx_info->page);
1578 	xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1579 	/* If for some reason we received a bigger packet than
1580 	 * we expect, then we simply drop it
1581 	 */
1582 	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1583 		return XDP_DROP;
1584 
1585 	ret = ena_xdp_execute(rx_ring, xdp, rx_info);
1586 
1587 	/* The xdp program might expand the headers */
1588 	if (ret == XDP_PASS) {
1589 		rx_info->page_offset = xdp->data - xdp->data_hard_start;
1590 		rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1591 	}
1592 
1593 	return ret;
1594 }
1595 /* ena_clean_rx_irq - Cleanup RX irq
1596  * @rx_ring: RX ring to clean
1597  * @napi: napi handler
1598  * @budget: how many packets driver is allowed to clean
1599  *
1600  * Returns the number of cleaned buffers.
1601  */
1602 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1603 			    u32 budget)
1604 {
1605 	u16 next_to_clean = rx_ring->next_to_clean;
1606 	struct ena_com_rx_ctx ena_rx_ctx;
1607 	struct ena_rx_buffer *rx_info;
1608 	struct ena_adapter *adapter;
1609 	u32 res_budget, work_done;
1610 	int rx_copybreak_pkt = 0;
1611 	int refill_threshold;
1612 	struct sk_buff *skb;
1613 	int refill_required;
1614 	struct xdp_buff xdp;
1615 	int total_len = 0;
1616 	int xdp_verdict;
1617 	int rc = 0;
1618 	int i;
1619 
1620 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1621 		  "%s qid %d\n", __func__, rx_ring->qid);
1622 	res_budget = budget;
1623 	xdp.rxq = &rx_ring->xdp_rxq;
1624 	xdp.frame_sz = ENA_PAGE_SIZE;
1625 
1626 	do {
1627 		xdp_verdict = XDP_PASS;
1628 		skb = NULL;
1629 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1630 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1631 		ena_rx_ctx.descs = 0;
1632 		ena_rx_ctx.pkt_offset = 0;
1633 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1634 				    rx_ring->ena_com_io_sq,
1635 				    &ena_rx_ctx);
1636 		if (unlikely(rc))
1637 			goto error;
1638 
1639 		if (unlikely(ena_rx_ctx.descs == 0))
1640 			break;
1641 
1642 		rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1643 		rx_info->page_offset = ena_rx_ctx.pkt_offset;
1644 
1645 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1646 			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1647 			  rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1648 			  ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1649 
1650 		if (ena_xdp_present_ring(rx_ring))
1651 			xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1652 
1653 		/* allocate skb and fill it */
1654 		if (xdp_verdict == XDP_PASS)
1655 			skb = ena_rx_skb(rx_ring,
1656 					 rx_ring->ena_bufs,
1657 					 ena_rx_ctx.descs,
1658 					 &next_to_clean);
1659 
1660 		if (unlikely(!skb)) {
1661 			if (xdp_verdict == XDP_TX)
1662 				ena_free_rx_page(rx_ring,
1663 						 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1664 			for (i = 0; i < ena_rx_ctx.descs; i++) {
1665 				rx_ring->free_ids[next_to_clean] =
1666 					rx_ring->ena_bufs[i].req_id;
1667 				next_to_clean =
1668 					ENA_RX_RING_IDX_NEXT(next_to_clean,
1669 							     rx_ring->ring_size);
1670 			}
1671 			if (xdp_verdict != XDP_PASS) {
1672 				res_budget--;
1673 				continue;
1674 			}
1675 			break;
1676 		}
1677 
1678 		ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1679 
1680 		ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1681 
1682 		skb_record_rx_queue(skb, rx_ring->qid);
1683 
1684 		if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1685 			total_len += rx_ring->ena_bufs[0].len;
1686 			rx_copybreak_pkt++;
1687 			napi_gro_receive(napi, skb);
1688 		} else {
1689 			total_len += skb->len;
1690 			napi_gro_frags(napi);
1691 		}
1692 
1693 		res_budget--;
1694 	} while (likely(res_budget));
1695 
1696 	work_done = budget - res_budget;
1697 	rx_ring->per_napi_packets += work_done;
1698 	u64_stats_update_begin(&rx_ring->syncp);
1699 	rx_ring->rx_stats.bytes += total_len;
1700 	rx_ring->rx_stats.cnt += work_done;
1701 	rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1702 	u64_stats_update_end(&rx_ring->syncp);
1703 
1704 	rx_ring->next_to_clean = next_to_clean;
1705 
1706 	refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1707 	refill_threshold =
1708 		min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1709 		      ENA_RX_REFILL_THRESH_PACKET);
1710 
1711 	/* Optimization, try to batch new rx buffers */
1712 	if (refill_required > refill_threshold) {
1713 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1714 		ena_refill_rx_bufs(rx_ring, refill_required);
1715 	}
1716 
1717 	return work_done;
1718 
1719 error:
1720 	adapter = netdev_priv(rx_ring->netdev);
1721 
1722 	u64_stats_update_begin(&rx_ring->syncp);
1723 	rx_ring->rx_stats.bad_desc_num++;
1724 	u64_stats_update_end(&rx_ring->syncp);
1725 
1726 	/* Too many desc from the device. Trigger reset */
1727 	adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1728 	set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1729 
1730 	return 0;
1731 }
1732 
1733 static void ena_dim_work(struct work_struct *w)
1734 {
1735 	struct dim *dim = container_of(w, struct dim, work);
1736 	struct dim_cq_moder cur_moder =
1737 		net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1738 	struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1739 
1740 	ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1741 	dim->state = DIM_START_MEASURE;
1742 }
1743 
1744 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1745 {
1746 	struct dim_sample dim_sample;
1747 	struct ena_ring *rx_ring = ena_napi->rx_ring;
1748 
1749 	if (!rx_ring->per_napi_packets)
1750 		return;
1751 
1752 	rx_ring->non_empty_napi_events++;
1753 
1754 	dim_update_sample(rx_ring->non_empty_napi_events,
1755 			  rx_ring->rx_stats.cnt,
1756 			  rx_ring->rx_stats.bytes,
1757 			  &dim_sample);
1758 
1759 	net_dim(&ena_napi->dim, dim_sample);
1760 
1761 	rx_ring->per_napi_packets = 0;
1762 }
1763 
1764 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1765 					struct ena_ring *rx_ring)
1766 {
1767 	struct ena_eth_io_intr_reg intr_reg;
1768 	u32 rx_interval = 0;
1769 	/* Rx ring can be NULL when for XDP tx queues which don't have an
1770 	 * accompanying rx_ring pair.
1771 	 */
1772 	if (rx_ring)
1773 		rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1774 			rx_ring->smoothed_interval :
1775 			ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1776 
1777 	/* Update intr register: rx intr delay,
1778 	 * tx intr delay and interrupt unmask
1779 	 */
1780 	ena_com_update_intr_reg(&intr_reg,
1781 				rx_interval,
1782 				tx_ring->smoothed_interval,
1783 				true);
1784 
1785 	u64_stats_update_begin(&tx_ring->syncp);
1786 	tx_ring->tx_stats.unmask_interrupt++;
1787 	u64_stats_update_end(&tx_ring->syncp);
1788 	/* It is a shared MSI-X.
1789 	 * Tx and Rx CQ have pointer to it.
1790 	 * So we use one of them to reach the intr reg
1791 	 * The Tx ring is used because the rx_ring is NULL for XDP queues
1792 	 */
1793 	ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1794 }
1795 
1796 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1797 					     struct ena_ring *rx_ring)
1798 {
1799 	int cpu = get_cpu();
1800 	int numa_node;
1801 
1802 	/* Check only one ring since the 2 rings are running on the same cpu */
1803 	if (likely(tx_ring->cpu == cpu))
1804 		goto out;
1805 
1806 	numa_node = cpu_to_node(cpu);
1807 	put_cpu();
1808 
1809 	if (numa_node != NUMA_NO_NODE) {
1810 		ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1811 		if (rx_ring)
1812 			ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1813 						 numa_node);
1814 	}
1815 
1816 	tx_ring->cpu = cpu;
1817 	if (rx_ring)
1818 		rx_ring->cpu = cpu;
1819 
1820 	return;
1821 out:
1822 	put_cpu();
1823 }
1824 
1825 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1826 {
1827 	u32 total_done = 0;
1828 	u16 next_to_clean;
1829 	u32 tx_bytes = 0;
1830 	int tx_pkts = 0;
1831 	u16 req_id;
1832 	int rc;
1833 
1834 	if (unlikely(!xdp_ring))
1835 		return 0;
1836 	next_to_clean = xdp_ring->next_to_clean;
1837 
1838 	while (tx_pkts < budget) {
1839 		struct ena_tx_buffer *tx_info;
1840 		struct xdp_frame *xdpf;
1841 
1842 		rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1843 						&req_id);
1844 		if (rc)
1845 			break;
1846 
1847 		rc = validate_xdp_req_id(xdp_ring, req_id);
1848 		if (rc)
1849 			break;
1850 
1851 		tx_info = &xdp_ring->tx_buffer_info[req_id];
1852 		xdpf = tx_info->xdpf;
1853 
1854 		tx_info->xdpf = NULL;
1855 		tx_info->last_jiffies = 0;
1856 		ena_unmap_tx_buff(xdp_ring, tx_info);
1857 
1858 		netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1859 			  "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1860 			  xdpf);
1861 
1862 		tx_bytes += xdpf->len;
1863 		tx_pkts++;
1864 		total_done += tx_info->tx_descs;
1865 
1866 		__free_page(tx_info->xdp_rx_page);
1867 		xdp_ring->free_ids[next_to_clean] = req_id;
1868 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1869 						     xdp_ring->ring_size);
1870 	}
1871 
1872 	xdp_ring->next_to_clean = next_to_clean;
1873 	ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1874 	ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1875 
1876 	netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1877 		  "tx_poll: q %d done. total pkts: %d\n",
1878 		  xdp_ring->qid, tx_pkts);
1879 
1880 	return tx_pkts;
1881 }
1882 
1883 static int ena_io_poll(struct napi_struct *napi, int budget)
1884 {
1885 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1886 	struct ena_ring *tx_ring, *rx_ring;
1887 	int tx_work_done;
1888 	int rx_work_done = 0;
1889 	int tx_budget;
1890 	int napi_comp_call = 0;
1891 	int ret;
1892 
1893 	tx_ring = ena_napi->tx_ring;
1894 	rx_ring = ena_napi->rx_ring;
1895 
1896 	tx_ring->first_interrupt = ena_napi->first_interrupt;
1897 	rx_ring->first_interrupt = ena_napi->first_interrupt;
1898 
1899 	tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1900 
1901 	if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1902 	    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1903 		napi_complete_done(napi, 0);
1904 		return 0;
1905 	}
1906 
1907 	tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1908 	/* On netpoll the budget is zero and the handler should only clean the
1909 	 * tx completions.
1910 	 */
1911 	if (likely(budget))
1912 		rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1913 
1914 	/* If the device is about to reset or down, avoid unmask
1915 	 * the interrupt and return 0 so NAPI won't reschedule
1916 	 */
1917 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1918 		     test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1919 		napi_complete_done(napi, 0);
1920 		ret = 0;
1921 
1922 	} else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1923 		napi_comp_call = 1;
1924 
1925 		/* Update numa and unmask the interrupt only when schedule
1926 		 * from the interrupt context (vs from sk_busy_loop)
1927 		 */
1928 		if (napi_complete_done(napi, rx_work_done) &&
1929 		    READ_ONCE(ena_napi->interrupts_masked)) {
1930 			smp_rmb(); /* make sure interrupts_masked is read */
1931 			WRITE_ONCE(ena_napi->interrupts_masked, false);
1932 			/* We apply adaptive moderation on Rx path only.
1933 			 * Tx uses static interrupt moderation.
1934 			 */
1935 			if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1936 				ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1937 
1938 			ena_unmask_interrupt(tx_ring, rx_ring);
1939 		}
1940 
1941 		ena_update_ring_numa_node(tx_ring, rx_ring);
1942 
1943 		ret = rx_work_done;
1944 	} else {
1945 		ret = budget;
1946 	}
1947 
1948 	u64_stats_update_begin(&tx_ring->syncp);
1949 	tx_ring->tx_stats.napi_comp += napi_comp_call;
1950 	tx_ring->tx_stats.tx_poll++;
1951 	u64_stats_update_end(&tx_ring->syncp);
1952 
1953 	return ret;
1954 }
1955 
1956 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1957 {
1958 	struct ena_adapter *adapter = (struct ena_adapter *)data;
1959 
1960 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1961 
1962 	/* Don't call the aenq handler before probe is done */
1963 	if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1964 		ena_com_aenq_intr_handler(adapter->ena_dev, data);
1965 
1966 	return IRQ_HANDLED;
1967 }
1968 
1969 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1970  * @irq: interrupt number
1971  * @data: pointer to a network interface private napi device structure
1972  */
1973 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1974 {
1975 	struct ena_napi *ena_napi = data;
1976 
1977 	ena_napi->first_interrupt = true;
1978 
1979 	WRITE_ONCE(ena_napi->interrupts_masked, true);
1980 	smp_wmb(); /* write interrupts_masked before calling napi */
1981 
1982 	napi_schedule_irqoff(&ena_napi->napi);
1983 
1984 	return IRQ_HANDLED;
1985 }
1986 
1987 /* Reserve a single MSI-X vector for management (admin + aenq).
1988  * plus reserve one vector for each potential io queue.
1989  * the number of potential io queues is the minimum of what the device
1990  * supports and the number of vCPUs.
1991  */
1992 static int ena_enable_msix(struct ena_adapter *adapter)
1993 {
1994 	int msix_vecs, irq_cnt;
1995 
1996 	if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1997 		netif_err(adapter, probe, adapter->netdev,
1998 			  "Error, MSI-X is already enabled\n");
1999 		return -EPERM;
2000 	}
2001 
2002 	/* Reserved the max msix vectors we might need */
2003 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
2004 	netif_dbg(adapter, probe, adapter->netdev,
2005 		  "trying to enable MSI-X, vectors %d\n", msix_vecs);
2006 
2007 	irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
2008 					msix_vecs, PCI_IRQ_MSIX);
2009 
2010 	if (irq_cnt < 0) {
2011 		netif_err(adapter, probe, adapter->netdev,
2012 			  "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
2013 		return -ENOSPC;
2014 	}
2015 
2016 	if (irq_cnt != msix_vecs) {
2017 		netif_notice(adapter, probe, adapter->netdev,
2018 			     "enable only %d MSI-X (out of %d), reduce the number of queues\n",
2019 			     irq_cnt, msix_vecs);
2020 		adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
2021 	}
2022 
2023 	if (ena_init_rx_cpu_rmap(adapter))
2024 		netif_warn(adapter, probe, adapter->netdev,
2025 			   "Failed to map IRQs to CPUs\n");
2026 
2027 	adapter->msix_vecs = irq_cnt;
2028 	set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2029 
2030 	return 0;
2031 }
2032 
2033 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2034 {
2035 	u32 cpu;
2036 
2037 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2038 		 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2039 		 pci_name(adapter->pdev));
2040 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2041 		ena_intr_msix_mgmnt;
2042 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2043 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2044 		pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2045 	cpu = cpumask_first(cpu_online_mask);
2046 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2047 	cpumask_set_cpu(cpu,
2048 			&adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2049 }
2050 
2051 static void ena_setup_io_intr(struct ena_adapter *adapter)
2052 {
2053 	struct net_device *netdev;
2054 	int irq_idx, i, cpu;
2055 	int io_queue_count;
2056 
2057 	netdev = adapter->netdev;
2058 	io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2059 
2060 	for (i = 0; i < io_queue_count; i++) {
2061 		irq_idx = ENA_IO_IRQ_IDX(i);
2062 		cpu = i % num_online_cpus();
2063 
2064 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2065 			 "%s-Tx-Rx-%d", netdev->name, i);
2066 		adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2067 		adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2068 		adapter->irq_tbl[irq_idx].vector =
2069 			pci_irq_vector(adapter->pdev, irq_idx);
2070 		adapter->irq_tbl[irq_idx].cpu = cpu;
2071 
2072 		cpumask_set_cpu(cpu,
2073 				&adapter->irq_tbl[irq_idx].affinity_hint_mask);
2074 	}
2075 }
2076 
2077 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2078 {
2079 	unsigned long flags = 0;
2080 	struct ena_irq *irq;
2081 	int rc;
2082 
2083 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2084 	rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2085 			 irq->data);
2086 	if (rc) {
2087 		netif_err(adapter, probe, adapter->netdev,
2088 			  "failed to request admin irq\n");
2089 		return rc;
2090 	}
2091 
2092 	netif_dbg(adapter, probe, adapter->netdev,
2093 		  "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2094 		  irq->affinity_hint_mask.bits[0], irq->vector);
2095 
2096 	irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2097 
2098 	return rc;
2099 }
2100 
2101 static int ena_request_io_irq(struct ena_adapter *adapter)
2102 {
2103 	u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2104 	unsigned long flags = 0;
2105 	struct ena_irq *irq;
2106 	int rc = 0, i, k;
2107 
2108 	if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2109 		netif_err(adapter, ifup, adapter->netdev,
2110 			  "Failed to request I/O IRQ: MSI-X is not enabled\n");
2111 		return -EINVAL;
2112 	}
2113 
2114 	for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2115 		irq = &adapter->irq_tbl[i];
2116 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2117 				 irq->data);
2118 		if (rc) {
2119 			netif_err(adapter, ifup, adapter->netdev,
2120 				  "Failed to request I/O IRQ. index %d rc %d\n",
2121 				   i, rc);
2122 			goto err;
2123 		}
2124 
2125 		netif_dbg(adapter, ifup, adapter->netdev,
2126 			  "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2127 			  i, irq->affinity_hint_mask.bits[0], irq->vector);
2128 
2129 		irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2130 	}
2131 
2132 	return rc;
2133 
2134 err:
2135 	for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2136 		irq = &adapter->irq_tbl[k];
2137 		free_irq(irq->vector, irq->data);
2138 	}
2139 
2140 	return rc;
2141 }
2142 
2143 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2144 {
2145 	struct ena_irq *irq;
2146 
2147 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2148 	synchronize_irq(irq->vector);
2149 	irq_set_affinity_hint(irq->vector, NULL);
2150 	free_irq(irq->vector, irq->data);
2151 }
2152 
2153 static void ena_free_io_irq(struct ena_adapter *adapter)
2154 {
2155 	u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2156 	struct ena_irq *irq;
2157 	int i;
2158 
2159 #ifdef CONFIG_RFS_ACCEL
2160 	if (adapter->msix_vecs >= 1) {
2161 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2162 		adapter->netdev->rx_cpu_rmap = NULL;
2163 	}
2164 #endif /* CONFIG_RFS_ACCEL */
2165 
2166 	for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2167 		irq = &adapter->irq_tbl[i];
2168 		irq_set_affinity_hint(irq->vector, NULL);
2169 		free_irq(irq->vector, irq->data);
2170 	}
2171 }
2172 
2173 static void ena_disable_msix(struct ena_adapter *adapter)
2174 {
2175 	if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2176 		pci_free_irq_vectors(adapter->pdev);
2177 }
2178 
2179 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2180 {
2181 	u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2182 	int i;
2183 
2184 	if (!netif_running(adapter->netdev))
2185 		return;
2186 
2187 	for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2188 		synchronize_irq(adapter->irq_tbl[i].vector);
2189 }
2190 
2191 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2192 				  int first_index,
2193 				  int count)
2194 {
2195 	int i;
2196 
2197 	for (i = first_index; i < first_index + count; i++) {
2198 		netif_napi_del(&adapter->ena_napi[i].napi);
2199 
2200 		WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2201 			adapter->ena_napi[i].xdp_ring);
2202 	}
2203 }
2204 
2205 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2206 				   int first_index, int count)
2207 {
2208 	int i;
2209 
2210 	for (i = first_index; i < first_index + count; i++) {
2211 		struct ena_napi *napi = &adapter->ena_napi[i];
2212 
2213 		netif_napi_add(adapter->netdev,
2214 			       &napi->napi,
2215 			       ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2216 			       ENA_NAPI_BUDGET);
2217 
2218 		if (!ENA_IS_XDP_INDEX(adapter, i)) {
2219 			napi->rx_ring = &adapter->rx_ring[i];
2220 			napi->tx_ring = &adapter->tx_ring[i];
2221 		} else {
2222 			napi->xdp_ring = &adapter->tx_ring[i];
2223 		}
2224 		napi->qid = i;
2225 	}
2226 }
2227 
2228 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2229 				      int first_index,
2230 				      int count)
2231 {
2232 	int i;
2233 
2234 	for (i = first_index; i < first_index + count; i++)
2235 		napi_disable(&adapter->ena_napi[i].napi);
2236 }
2237 
2238 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2239 				     int first_index,
2240 				     int count)
2241 {
2242 	int i;
2243 
2244 	for (i = first_index; i < first_index + count; i++)
2245 		napi_enable(&adapter->ena_napi[i].napi);
2246 }
2247 
2248 /* Configure the Rx forwarding */
2249 static int ena_rss_configure(struct ena_adapter *adapter)
2250 {
2251 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2252 	int rc;
2253 
2254 	/* In case the RSS table wasn't initialized by probe */
2255 	if (!ena_dev->rss.tbl_log_size) {
2256 		rc = ena_rss_init_default(adapter);
2257 		if (rc && (rc != -EOPNOTSUPP)) {
2258 			netif_err(adapter, ifup, adapter->netdev,
2259 				  "Failed to init RSS rc: %d\n", rc);
2260 			return rc;
2261 		}
2262 	}
2263 
2264 	/* Set indirect table */
2265 	rc = ena_com_indirect_table_set(ena_dev);
2266 	if (unlikely(rc && rc != -EOPNOTSUPP))
2267 		return rc;
2268 
2269 	/* Configure hash function (if supported) */
2270 	rc = ena_com_set_hash_function(ena_dev);
2271 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
2272 		return rc;
2273 
2274 	/* Configure hash inputs (if supported) */
2275 	rc = ena_com_set_hash_ctrl(ena_dev);
2276 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
2277 		return rc;
2278 
2279 	return 0;
2280 }
2281 
2282 static int ena_up_complete(struct ena_adapter *adapter)
2283 {
2284 	int rc;
2285 
2286 	rc = ena_rss_configure(adapter);
2287 	if (rc)
2288 		return rc;
2289 
2290 	ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2291 
2292 	ena_refill_all_rx_bufs(adapter);
2293 
2294 	/* enable transmits */
2295 	netif_tx_start_all_queues(adapter->netdev);
2296 
2297 	ena_napi_enable_in_range(adapter,
2298 				 0,
2299 				 adapter->xdp_num_queues + adapter->num_io_queues);
2300 
2301 	return 0;
2302 }
2303 
2304 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2305 {
2306 	struct ena_com_create_io_ctx ctx;
2307 	struct ena_com_dev *ena_dev;
2308 	struct ena_ring *tx_ring;
2309 	u32 msix_vector;
2310 	u16 ena_qid;
2311 	int rc;
2312 
2313 	ena_dev = adapter->ena_dev;
2314 
2315 	tx_ring = &adapter->tx_ring[qid];
2316 	msix_vector = ENA_IO_IRQ_IDX(qid);
2317 	ena_qid = ENA_IO_TXQ_IDX(qid);
2318 
2319 	memset(&ctx, 0x0, sizeof(ctx));
2320 
2321 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2322 	ctx.qid = ena_qid;
2323 	ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2324 	ctx.msix_vector = msix_vector;
2325 	ctx.queue_size = tx_ring->ring_size;
2326 	ctx.numa_node = cpu_to_node(tx_ring->cpu);
2327 
2328 	rc = ena_com_create_io_queue(ena_dev, &ctx);
2329 	if (rc) {
2330 		netif_err(adapter, ifup, adapter->netdev,
2331 			  "Failed to create I/O TX queue num %d rc: %d\n",
2332 			  qid, rc);
2333 		return rc;
2334 	}
2335 
2336 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2337 				     &tx_ring->ena_com_io_sq,
2338 				     &tx_ring->ena_com_io_cq);
2339 	if (rc) {
2340 		netif_err(adapter, ifup, adapter->netdev,
2341 			  "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2342 			  qid, rc);
2343 		ena_com_destroy_io_queue(ena_dev, ena_qid);
2344 		return rc;
2345 	}
2346 
2347 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2348 	return rc;
2349 }
2350 
2351 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2352 					    int first_index, int count)
2353 {
2354 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2355 	int rc, i;
2356 
2357 	for (i = first_index; i < first_index + count; i++) {
2358 		rc = ena_create_io_tx_queue(adapter, i);
2359 		if (rc)
2360 			goto create_err;
2361 	}
2362 
2363 	return 0;
2364 
2365 create_err:
2366 	while (i-- > first_index)
2367 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2368 
2369 	return rc;
2370 }
2371 
2372 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2373 {
2374 	struct ena_com_dev *ena_dev;
2375 	struct ena_com_create_io_ctx ctx;
2376 	struct ena_ring *rx_ring;
2377 	u32 msix_vector;
2378 	u16 ena_qid;
2379 	int rc;
2380 
2381 	ena_dev = adapter->ena_dev;
2382 
2383 	rx_ring = &adapter->rx_ring[qid];
2384 	msix_vector = ENA_IO_IRQ_IDX(qid);
2385 	ena_qid = ENA_IO_RXQ_IDX(qid);
2386 
2387 	memset(&ctx, 0x0, sizeof(ctx));
2388 
2389 	ctx.qid = ena_qid;
2390 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2391 	ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2392 	ctx.msix_vector = msix_vector;
2393 	ctx.queue_size = rx_ring->ring_size;
2394 	ctx.numa_node = cpu_to_node(rx_ring->cpu);
2395 
2396 	rc = ena_com_create_io_queue(ena_dev, &ctx);
2397 	if (rc) {
2398 		netif_err(adapter, ifup, adapter->netdev,
2399 			  "Failed to create I/O RX queue num %d rc: %d\n",
2400 			  qid, rc);
2401 		return rc;
2402 	}
2403 
2404 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2405 				     &rx_ring->ena_com_io_sq,
2406 				     &rx_ring->ena_com_io_cq);
2407 	if (rc) {
2408 		netif_err(adapter, ifup, adapter->netdev,
2409 			  "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2410 			  qid, rc);
2411 		goto err;
2412 	}
2413 
2414 	ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2415 
2416 	return rc;
2417 err:
2418 	ena_com_destroy_io_queue(ena_dev, ena_qid);
2419 	return rc;
2420 }
2421 
2422 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2423 {
2424 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2425 	int rc, i;
2426 
2427 	for (i = 0; i < adapter->num_io_queues; i++) {
2428 		rc = ena_create_io_rx_queue(adapter, i);
2429 		if (rc)
2430 			goto create_err;
2431 		INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2432 	}
2433 
2434 	return 0;
2435 
2436 create_err:
2437 	while (i--) {
2438 		cancel_work_sync(&adapter->ena_napi[i].dim.work);
2439 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2440 	}
2441 
2442 	return rc;
2443 }
2444 
2445 static void set_io_rings_size(struct ena_adapter *adapter,
2446 			      int new_tx_size,
2447 			      int new_rx_size)
2448 {
2449 	int i;
2450 
2451 	for (i = 0; i < adapter->num_io_queues; i++) {
2452 		adapter->tx_ring[i].ring_size = new_tx_size;
2453 		adapter->rx_ring[i].ring_size = new_rx_size;
2454 	}
2455 }
2456 
2457 /* This function allows queue allocation to backoff when the system is
2458  * low on memory. If there is not enough memory to allocate io queues
2459  * the driver will try to allocate smaller queues.
2460  *
2461  * The backoff algorithm is as follows:
2462  *  1. Try to allocate TX and RX and if successful.
2463  *  1.1. return success
2464  *
2465  *  2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2466  *
2467  *  3. If TX or RX is smaller than 256
2468  *  3.1. return failure.
2469  *  4. else
2470  *  4.1. go back to 1.
2471  */
2472 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2473 {
2474 	int rc, cur_rx_ring_size, cur_tx_ring_size;
2475 	int new_rx_ring_size, new_tx_ring_size;
2476 
2477 	/* current queue sizes might be set to smaller than the requested
2478 	 * ones due to past queue allocation failures.
2479 	 */
2480 	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2481 			  adapter->requested_rx_ring_size);
2482 
2483 	while (1) {
2484 		if (ena_xdp_present(adapter)) {
2485 			rc = ena_setup_and_create_all_xdp_queues(adapter);
2486 
2487 			if (rc)
2488 				goto err_setup_tx;
2489 		}
2490 		rc = ena_setup_tx_resources_in_range(adapter,
2491 						     0,
2492 						     adapter->num_io_queues);
2493 		if (rc)
2494 			goto err_setup_tx;
2495 
2496 		rc = ena_create_io_tx_queues_in_range(adapter,
2497 						      0,
2498 						      adapter->num_io_queues);
2499 		if (rc)
2500 			goto err_create_tx_queues;
2501 
2502 		rc = ena_setup_all_rx_resources(adapter);
2503 		if (rc)
2504 			goto err_setup_rx;
2505 
2506 		rc = ena_create_all_io_rx_queues(adapter);
2507 		if (rc)
2508 			goto err_create_rx_queues;
2509 
2510 		return 0;
2511 
2512 err_create_rx_queues:
2513 		ena_free_all_io_rx_resources(adapter);
2514 err_setup_rx:
2515 		ena_destroy_all_tx_queues(adapter);
2516 err_create_tx_queues:
2517 		ena_free_all_io_tx_resources(adapter);
2518 err_setup_tx:
2519 		if (rc != -ENOMEM) {
2520 			netif_err(adapter, ifup, adapter->netdev,
2521 				  "Queue creation failed with error code %d\n",
2522 				  rc);
2523 			return rc;
2524 		}
2525 
2526 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2527 		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2528 
2529 		netif_err(adapter, ifup, adapter->netdev,
2530 			  "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2531 			  cur_tx_ring_size, cur_rx_ring_size);
2532 
2533 		new_tx_ring_size = cur_tx_ring_size;
2534 		new_rx_ring_size = cur_rx_ring_size;
2535 
2536 		/* Decrease the size of the larger queue, or
2537 		 * decrease both if they are the same size.
2538 		 */
2539 		if (cur_rx_ring_size <= cur_tx_ring_size)
2540 			new_tx_ring_size = cur_tx_ring_size / 2;
2541 		if (cur_rx_ring_size >= cur_tx_ring_size)
2542 			new_rx_ring_size = cur_rx_ring_size / 2;
2543 
2544 		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2545 		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2546 			netif_err(adapter, ifup, adapter->netdev,
2547 				  "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2548 				  ENA_MIN_RING_SIZE);
2549 			return rc;
2550 		}
2551 
2552 		netif_err(adapter, ifup, adapter->netdev,
2553 			  "Retrying queue creation with sizes TX=%d, RX=%d\n",
2554 			  new_tx_ring_size,
2555 			  new_rx_ring_size);
2556 
2557 		set_io_rings_size(adapter, new_tx_ring_size,
2558 				  new_rx_ring_size);
2559 	}
2560 }
2561 
2562 static int ena_up(struct ena_adapter *adapter)
2563 {
2564 	int io_queue_count, rc, i;
2565 
2566 	netdev_dbg(adapter->netdev, "%s\n", __func__);
2567 
2568 	io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2569 	ena_setup_io_intr(adapter);
2570 
2571 	/* napi poll functions should be initialized before running
2572 	 * request_irq(), to handle a rare condition where there is a pending
2573 	 * interrupt, causing the ISR to fire immediately while the poll
2574 	 * function wasn't set yet, causing a null dereference
2575 	 */
2576 	ena_init_napi_in_range(adapter, 0, io_queue_count);
2577 
2578 	rc = ena_request_io_irq(adapter);
2579 	if (rc)
2580 		goto err_req_irq;
2581 
2582 	rc = create_queues_with_size_backoff(adapter);
2583 	if (rc)
2584 		goto err_create_queues_with_backoff;
2585 
2586 	rc = ena_up_complete(adapter);
2587 	if (rc)
2588 		goto err_up;
2589 
2590 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2591 		netif_carrier_on(adapter->netdev);
2592 
2593 	u64_stats_update_begin(&adapter->syncp);
2594 	adapter->dev_stats.interface_up++;
2595 	u64_stats_update_end(&adapter->syncp);
2596 
2597 	set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2598 
2599 	/* Enable completion queues interrupt */
2600 	for (i = 0; i < adapter->num_io_queues; i++)
2601 		ena_unmask_interrupt(&adapter->tx_ring[i],
2602 				     &adapter->rx_ring[i]);
2603 
2604 	/* schedule napi in case we had pending packets
2605 	 * from the last time we disable napi
2606 	 */
2607 	for (i = 0; i < io_queue_count; i++)
2608 		napi_schedule(&adapter->ena_napi[i].napi);
2609 
2610 	return rc;
2611 
2612 err_up:
2613 	ena_destroy_all_tx_queues(adapter);
2614 	ena_free_all_io_tx_resources(adapter);
2615 	ena_destroy_all_rx_queues(adapter);
2616 	ena_free_all_io_rx_resources(adapter);
2617 err_create_queues_with_backoff:
2618 	ena_free_io_irq(adapter);
2619 err_req_irq:
2620 	ena_del_napi_in_range(adapter, 0, io_queue_count);
2621 
2622 	return rc;
2623 }
2624 
2625 static void ena_down(struct ena_adapter *adapter)
2626 {
2627 	int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2628 
2629 	netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2630 
2631 	clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2632 
2633 	u64_stats_update_begin(&adapter->syncp);
2634 	adapter->dev_stats.interface_down++;
2635 	u64_stats_update_end(&adapter->syncp);
2636 
2637 	netif_carrier_off(adapter->netdev);
2638 	netif_tx_disable(adapter->netdev);
2639 
2640 	/* After this point the napi handler won't enable the tx queue */
2641 	ena_napi_disable_in_range(adapter, 0, io_queue_count);
2642 
2643 	/* After destroy the queue there won't be any new interrupts */
2644 
2645 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2646 		int rc;
2647 
2648 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2649 		if (rc)
2650 			dev_err(&adapter->pdev->dev, "Device reset failed\n");
2651 		/* stop submitting admin commands on a device that was reset */
2652 		ena_com_set_admin_running_state(adapter->ena_dev, false);
2653 	}
2654 
2655 	ena_destroy_all_io_queues(adapter);
2656 
2657 	ena_disable_io_intr_sync(adapter);
2658 	ena_free_io_irq(adapter);
2659 	ena_del_napi_in_range(adapter, 0, io_queue_count);
2660 
2661 	ena_free_all_tx_bufs(adapter);
2662 	ena_free_all_rx_bufs(adapter);
2663 	ena_free_all_io_tx_resources(adapter);
2664 	ena_free_all_io_rx_resources(adapter);
2665 }
2666 
2667 /* ena_open - Called when a network interface is made active
2668  * @netdev: network interface device structure
2669  *
2670  * Returns 0 on success, negative value on failure
2671  *
2672  * The open entry point is called when a network interface is made
2673  * active by the system (IFF_UP).  At this point all resources needed
2674  * for transmit and receive operations are allocated, the interrupt
2675  * handler is registered with the OS, the watchdog timer is started,
2676  * and the stack is notified that the interface is ready.
2677  */
2678 static int ena_open(struct net_device *netdev)
2679 {
2680 	struct ena_adapter *adapter = netdev_priv(netdev);
2681 	int rc;
2682 
2683 	/* Notify the stack of the actual queue counts. */
2684 	rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2685 	if (rc) {
2686 		netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2687 		return rc;
2688 	}
2689 
2690 	rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2691 	if (rc) {
2692 		netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2693 		return rc;
2694 	}
2695 
2696 	rc = ena_up(adapter);
2697 	if (rc)
2698 		return rc;
2699 
2700 	return rc;
2701 }
2702 
2703 /* ena_close - Disables a network interface
2704  * @netdev: network interface device structure
2705  *
2706  * Returns 0, this is not allowed to fail
2707  *
2708  * The close entry point is called when an interface is de-activated
2709  * by the OS.  The hardware is still under the drivers control, but
2710  * needs to be disabled.  A global MAC reset is issued to stop the
2711  * hardware, and all transmit and receive resources are freed.
2712  */
2713 static int ena_close(struct net_device *netdev)
2714 {
2715 	struct ena_adapter *adapter = netdev_priv(netdev);
2716 
2717 	netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2718 
2719 	if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2720 		return 0;
2721 
2722 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2723 		ena_down(adapter);
2724 
2725 	/* Check for device status and issue reset if needed*/
2726 	check_for_admin_com_state(adapter);
2727 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2728 		netif_err(adapter, ifdown, adapter->netdev,
2729 			  "Destroy failure, restarting device\n");
2730 		ena_dump_stats_to_dmesg(adapter);
2731 		/* rtnl lock already obtained in dev_ioctl() layer */
2732 		ena_destroy_device(adapter, false);
2733 		ena_restore_device(adapter);
2734 	}
2735 
2736 	return 0;
2737 }
2738 
2739 int ena_update_queue_sizes(struct ena_adapter *adapter,
2740 			   u32 new_tx_size,
2741 			   u32 new_rx_size)
2742 {
2743 	bool dev_was_up;
2744 
2745 	dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2746 	ena_close(adapter->netdev);
2747 	adapter->requested_tx_ring_size = new_tx_size;
2748 	adapter->requested_rx_ring_size = new_rx_size;
2749 	ena_init_io_rings(adapter,
2750 			  0,
2751 			  adapter->xdp_num_queues +
2752 			  adapter->num_io_queues);
2753 	return dev_was_up ? ena_up(adapter) : 0;
2754 }
2755 
2756 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2757 {
2758 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2759 	int prev_channel_count;
2760 	bool dev_was_up;
2761 
2762 	dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2763 	ena_close(adapter->netdev);
2764 	prev_channel_count = adapter->num_io_queues;
2765 	adapter->num_io_queues = new_channel_count;
2766 	if (ena_xdp_present(adapter) &&
2767 	    ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2768 		adapter->xdp_first_ring = new_channel_count;
2769 		adapter->xdp_num_queues = new_channel_count;
2770 		if (prev_channel_count > new_channel_count)
2771 			ena_xdp_exchange_program_rx_in_range(adapter,
2772 							     NULL,
2773 							     new_channel_count,
2774 							     prev_channel_count);
2775 		else
2776 			ena_xdp_exchange_program_rx_in_range(adapter,
2777 							     adapter->xdp_bpf_prog,
2778 							     prev_channel_count,
2779 							     new_channel_count);
2780 	}
2781 
2782 	/* We need to destroy the rss table so that the indirection
2783 	 * table will be reinitialized by ena_up()
2784 	 */
2785 	ena_com_rss_destroy(ena_dev);
2786 	ena_init_io_rings(adapter,
2787 			  0,
2788 			  adapter->xdp_num_queues +
2789 			  adapter->num_io_queues);
2790 	return dev_was_up ? ena_open(adapter->netdev) : 0;
2791 }
2792 
2793 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2794 			struct sk_buff *skb,
2795 			bool disable_meta_caching)
2796 {
2797 	u32 mss = skb_shinfo(skb)->gso_size;
2798 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2799 	u8 l4_protocol = 0;
2800 
2801 	if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2802 		ena_tx_ctx->l4_csum_enable = 1;
2803 		if (mss) {
2804 			ena_tx_ctx->tso_enable = 1;
2805 			ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2806 			ena_tx_ctx->l4_csum_partial = 0;
2807 		} else {
2808 			ena_tx_ctx->tso_enable = 0;
2809 			ena_meta->l4_hdr_len = 0;
2810 			ena_tx_ctx->l4_csum_partial = 1;
2811 		}
2812 
2813 		switch (ip_hdr(skb)->version) {
2814 		case IPVERSION:
2815 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2816 			if (ip_hdr(skb)->frag_off & htons(IP_DF))
2817 				ena_tx_ctx->df = 1;
2818 			if (mss)
2819 				ena_tx_ctx->l3_csum_enable = 1;
2820 			l4_protocol = ip_hdr(skb)->protocol;
2821 			break;
2822 		case 6:
2823 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2824 			l4_protocol = ipv6_hdr(skb)->nexthdr;
2825 			break;
2826 		default:
2827 			break;
2828 		}
2829 
2830 		if (l4_protocol == IPPROTO_TCP)
2831 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2832 		else
2833 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2834 
2835 		ena_meta->mss = mss;
2836 		ena_meta->l3_hdr_len = skb_network_header_len(skb);
2837 		ena_meta->l3_hdr_offset = skb_network_offset(skb);
2838 		ena_tx_ctx->meta_valid = 1;
2839 	} else if (disable_meta_caching) {
2840 		memset(ena_meta, 0, sizeof(*ena_meta));
2841 		ena_tx_ctx->meta_valid = 1;
2842 	} else {
2843 		ena_tx_ctx->meta_valid = 0;
2844 	}
2845 }
2846 
2847 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2848 				       struct sk_buff *skb)
2849 {
2850 	int num_frags, header_len, rc;
2851 
2852 	num_frags = skb_shinfo(skb)->nr_frags;
2853 	header_len = skb_headlen(skb);
2854 
2855 	if (num_frags < tx_ring->sgl_size)
2856 		return 0;
2857 
2858 	if ((num_frags == tx_ring->sgl_size) &&
2859 	    (header_len < tx_ring->tx_max_header_size))
2860 		return 0;
2861 
2862 	u64_stats_update_begin(&tx_ring->syncp);
2863 	tx_ring->tx_stats.linearize++;
2864 	u64_stats_update_end(&tx_ring->syncp);
2865 
2866 	rc = skb_linearize(skb);
2867 	if (unlikely(rc)) {
2868 		u64_stats_update_begin(&tx_ring->syncp);
2869 		tx_ring->tx_stats.linearize_failed++;
2870 		u64_stats_update_end(&tx_ring->syncp);
2871 	}
2872 
2873 	return rc;
2874 }
2875 
2876 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2877 			  struct ena_tx_buffer *tx_info,
2878 			  struct sk_buff *skb,
2879 			  void **push_hdr,
2880 			  u16 *header_len)
2881 {
2882 	struct ena_adapter *adapter = tx_ring->adapter;
2883 	struct ena_com_buf *ena_buf;
2884 	dma_addr_t dma;
2885 	u32 skb_head_len, frag_len, last_frag;
2886 	u16 push_len = 0;
2887 	u16 delta = 0;
2888 	int i = 0;
2889 
2890 	skb_head_len = skb_headlen(skb);
2891 	tx_info->skb = skb;
2892 	ena_buf = tx_info->bufs;
2893 
2894 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2895 		/* When the device is LLQ mode, the driver will copy
2896 		 * the header into the device memory space.
2897 		 * the ena_com layer assume the header is in a linear
2898 		 * memory space.
2899 		 * This assumption might be wrong since part of the header
2900 		 * can be in the fragmented buffers.
2901 		 * Use skb_header_pointer to make sure the header is in a
2902 		 * linear memory space.
2903 		 */
2904 
2905 		push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2906 		*push_hdr = skb_header_pointer(skb, 0, push_len,
2907 					       tx_ring->push_buf_intermediate_buf);
2908 		*header_len = push_len;
2909 		if (unlikely(skb->data != *push_hdr)) {
2910 			u64_stats_update_begin(&tx_ring->syncp);
2911 			tx_ring->tx_stats.llq_buffer_copy++;
2912 			u64_stats_update_end(&tx_ring->syncp);
2913 
2914 			delta = push_len - skb_head_len;
2915 		}
2916 	} else {
2917 		*push_hdr = NULL;
2918 		*header_len = min_t(u32, skb_head_len,
2919 				    tx_ring->tx_max_header_size);
2920 	}
2921 
2922 	netif_dbg(adapter, tx_queued, adapter->netdev,
2923 		  "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2924 		  *push_hdr, push_len);
2925 
2926 	if (skb_head_len > push_len) {
2927 		dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2928 				     skb_head_len - push_len, DMA_TO_DEVICE);
2929 		if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2930 			goto error_report_dma_error;
2931 
2932 		ena_buf->paddr = dma;
2933 		ena_buf->len = skb_head_len - push_len;
2934 
2935 		ena_buf++;
2936 		tx_info->num_of_bufs++;
2937 		tx_info->map_linear_data = 1;
2938 	} else {
2939 		tx_info->map_linear_data = 0;
2940 	}
2941 
2942 	last_frag = skb_shinfo(skb)->nr_frags;
2943 
2944 	for (i = 0; i < last_frag; i++) {
2945 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2946 
2947 		frag_len = skb_frag_size(frag);
2948 
2949 		if (unlikely(delta >= frag_len)) {
2950 			delta -= frag_len;
2951 			continue;
2952 		}
2953 
2954 		dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2955 				       frag_len - delta, DMA_TO_DEVICE);
2956 		if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2957 			goto error_report_dma_error;
2958 
2959 		ena_buf->paddr = dma;
2960 		ena_buf->len = frag_len - delta;
2961 		ena_buf++;
2962 		tx_info->num_of_bufs++;
2963 		delta = 0;
2964 	}
2965 
2966 	return 0;
2967 
2968 error_report_dma_error:
2969 	u64_stats_update_begin(&tx_ring->syncp);
2970 	tx_ring->tx_stats.dma_mapping_err++;
2971 	u64_stats_update_end(&tx_ring->syncp);
2972 	netdev_warn(adapter->netdev, "failed to map skb\n");
2973 
2974 	tx_info->skb = NULL;
2975 
2976 	tx_info->num_of_bufs += i;
2977 	ena_unmap_tx_buff(tx_ring, tx_info);
2978 
2979 	return -EINVAL;
2980 }
2981 
2982 /* Called with netif_tx_lock. */
2983 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2984 {
2985 	struct ena_adapter *adapter = netdev_priv(dev);
2986 	struct ena_tx_buffer *tx_info;
2987 	struct ena_com_tx_ctx ena_tx_ctx;
2988 	struct ena_ring *tx_ring;
2989 	struct netdev_queue *txq;
2990 	void *push_hdr;
2991 	u16 next_to_use, req_id, header_len;
2992 	int qid, rc;
2993 
2994 	netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2995 	/*  Determine which tx ring we will be placed on */
2996 	qid = skb_get_queue_mapping(skb);
2997 	tx_ring = &adapter->tx_ring[qid];
2998 	txq = netdev_get_tx_queue(dev, qid);
2999 
3000 	rc = ena_check_and_linearize_skb(tx_ring, skb);
3001 	if (unlikely(rc))
3002 		goto error_drop_packet;
3003 
3004 	skb_tx_timestamp(skb);
3005 
3006 	next_to_use = tx_ring->next_to_use;
3007 	req_id = tx_ring->free_ids[next_to_use];
3008 	tx_info = &tx_ring->tx_buffer_info[req_id];
3009 	tx_info->num_of_bufs = 0;
3010 
3011 	WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
3012 
3013 	rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
3014 	if (unlikely(rc))
3015 		goto error_drop_packet;
3016 
3017 	memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3018 	ena_tx_ctx.ena_bufs = tx_info->bufs;
3019 	ena_tx_ctx.push_header = push_hdr;
3020 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3021 	ena_tx_ctx.req_id = req_id;
3022 	ena_tx_ctx.header_len = header_len;
3023 
3024 	/* set flags and meta data */
3025 	ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
3026 
3027 	rc = ena_xmit_common(dev,
3028 			     tx_ring,
3029 			     tx_info,
3030 			     &ena_tx_ctx,
3031 			     next_to_use,
3032 			     skb->len);
3033 	if (rc)
3034 		goto error_unmap_dma;
3035 
3036 	netdev_tx_sent_queue(txq, skb->len);
3037 
3038 	/* stop the queue when no more space available, the packet can have up
3039 	 * to sgl_size + 2. one for the meta descriptor and one for header
3040 	 * (if the header is larger than tx_max_header_size).
3041 	 */
3042 	if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3043 						   tx_ring->sgl_size + 2))) {
3044 		netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3045 			  __func__, qid);
3046 
3047 		netif_tx_stop_queue(txq);
3048 		u64_stats_update_begin(&tx_ring->syncp);
3049 		tx_ring->tx_stats.queue_stop++;
3050 		u64_stats_update_end(&tx_ring->syncp);
3051 
3052 		/* There is a rare condition where this function decide to
3053 		 * stop the queue but meanwhile clean_tx_irq updates
3054 		 * next_to_completion and terminates.
3055 		 * The queue will remain stopped forever.
3056 		 * To solve this issue add a mb() to make sure that
3057 		 * netif_tx_stop_queue() write is vissible before checking if
3058 		 * there is additional space in the queue.
3059 		 */
3060 		smp_mb();
3061 
3062 		if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3063 						 ENA_TX_WAKEUP_THRESH)) {
3064 			netif_tx_wake_queue(txq);
3065 			u64_stats_update_begin(&tx_ring->syncp);
3066 			tx_ring->tx_stats.queue_wakeup++;
3067 			u64_stats_update_end(&tx_ring->syncp);
3068 		}
3069 	}
3070 
3071 	if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
3072 		/* trigger the dma engine. ena_com_write_sq_doorbell()
3073 		 * has a mb
3074 		 */
3075 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3076 		u64_stats_update_begin(&tx_ring->syncp);
3077 		tx_ring->tx_stats.doorbells++;
3078 		u64_stats_update_end(&tx_ring->syncp);
3079 	}
3080 
3081 	return NETDEV_TX_OK;
3082 
3083 error_unmap_dma:
3084 	ena_unmap_tx_buff(tx_ring, tx_info);
3085 	tx_info->skb = NULL;
3086 
3087 error_drop_packet:
3088 	dev_kfree_skb(skb);
3089 	return NETDEV_TX_OK;
3090 }
3091 
3092 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3093 			    struct net_device *sb_dev)
3094 {
3095 	u16 qid;
3096 	/* we suspect that this is good for in--kernel network services that
3097 	 * want to loop incoming skb rx to tx in normal user generated traffic,
3098 	 * most probably we will not get to this
3099 	 */
3100 	if (skb_rx_queue_recorded(skb))
3101 		qid = skb_get_rx_queue(skb);
3102 	else
3103 		qid = netdev_pick_tx(dev, skb, NULL);
3104 
3105 	return qid;
3106 }
3107 
3108 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3109 {
3110 	struct ena_admin_host_info *host_info;
3111 	int rc;
3112 
3113 	/* Allocate only the host info */
3114 	rc = ena_com_allocate_host_info(ena_dev);
3115 	if (rc) {
3116 		pr_err("Cannot allocate host info\n");
3117 		return;
3118 	}
3119 
3120 	host_info = ena_dev->host_attr.host_info;
3121 
3122 	host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3123 	host_info->os_type = ENA_ADMIN_OS_LINUX;
3124 	host_info->kernel_ver = LINUX_VERSION_CODE;
3125 	strlcpy(host_info->kernel_ver_str, utsname()->version,
3126 		sizeof(host_info->kernel_ver_str) - 1);
3127 	host_info->os_dist = 0;
3128 	strncpy(host_info->os_dist_str, utsname()->release,
3129 		sizeof(host_info->os_dist_str) - 1);
3130 	host_info->driver_version =
3131 		(DRV_MODULE_GEN_MAJOR) |
3132 		(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3133 		(DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3134 		("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3135 	host_info->num_cpus = num_online_cpus();
3136 
3137 	host_info->driver_supported_features =
3138 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3139 		ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3140 		ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3141 		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
3142 
3143 	rc = ena_com_set_host_attributes(ena_dev);
3144 	if (rc) {
3145 		if (rc == -EOPNOTSUPP)
3146 			pr_warn("Cannot set host attributes\n");
3147 		else
3148 			pr_err("Cannot set host attributes\n");
3149 
3150 		goto err;
3151 	}
3152 
3153 	return;
3154 
3155 err:
3156 	ena_com_delete_host_info(ena_dev);
3157 }
3158 
3159 static void ena_config_debug_area(struct ena_adapter *adapter)
3160 {
3161 	u32 debug_area_size;
3162 	int rc, ss_count;
3163 
3164 	ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3165 	if (ss_count <= 0) {
3166 		netif_err(adapter, drv, adapter->netdev,
3167 			  "SS count is negative\n");
3168 		return;
3169 	}
3170 
3171 	/* allocate 32 bytes for each string and 64bit for the value */
3172 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3173 
3174 	rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3175 	if (rc) {
3176 		pr_err("Cannot allocate debug area\n");
3177 		return;
3178 	}
3179 
3180 	rc = ena_com_set_host_attributes(adapter->ena_dev);
3181 	if (rc) {
3182 		if (rc == -EOPNOTSUPP)
3183 			netif_warn(adapter, drv, adapter->netdev,
3184 				   "Cannot set host attributes\n");
3185 		else
3186 			netif_err(adapter, drv, adapter->netdev,
3187 				  "Cannot set host attributes\n");
3188 		goto err;
3189 	}
3190 
3191 	return;
3192 err:
3193 	ena_com_delete_debug_area(adapter->ena_dev);
3194 }
3195 
3196 int ena_update_hw_stats(struct ena_adapter *adapter)
3197 {
3198 	int rc = 0;
3199 
3200 	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3201 	if (rc) {
3202 		dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
3203 		return rc;
3204 	}
3205 
3206 	return 0;
3207 }
3208 
3209 static void ena_get_stats64(struct net_device *netdev,
3210 			    struct rtnl_link_stats64 *stats)
3211 {
3212 	struct ena_adapter *adapter = netdev_priv(netdev);
3213 	struct ena_ring *rx_ring, *tx_ring;
3214 	unsigned int start;
3215 	u64 rx_drops;
3216 	u64 tx_drops;
3217 	int i;
3218 
3219 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3220 		return;
3221 
3222 	for (i = 0; i < adapter->num_io_queues; i++) {
3223 		u64 bytes, packets;
3224 
3225 		tx_ring = &adapter->tx_ring[i];
3226 
3227 		do {
3228 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3229 			packets = tx_ring->tx_stats.cnt;
3230 			bytes = tx_ring->tx_stats.bytes;
3231 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3232 
3233 		stats->tx_packets += packets;
3234 		stats->tx_bytes += bytes;
3235 
3236 		rx_ring = &adapter->rx_ring[i];
3237 
3238 		do {
3239 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3240 			packets = rx_ring->rx_stats.cnt;
3241 			bytes = rx_ring->rx_stats.bytes;
3242 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3243 
3244 		stats->rx_packets += packets;
3245 		stats->rx_bytes += bytes;
3246 	}
3247 
3248 	do {
3249 		start = u64_stats_fetch_begin_irq(&adapter->syncp);
3250 		rx_drops = adapter->dev_stats.rx_drops;
3251 		tx_drops = adapter->dev_stats.tx_drops;
3252 	} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3253 
3254 	stats->rx_dropped = rx_drops;
3255 	stats->tx_dropped = tx_drops;
3256 
3257 	stats->multicast = 0;
3258 	stats->collisions = 0;
3259 
3260 	stats->rx_length_errors = 0;
3261 	stats->rx_crc_errors = 0;
3262 	stats->rx_frame_errors = 0;
3263 	stats->rx_fifo_errors = 0;
3264 	stats->rx_missed_errors = 0;
3265 	stats->tx_window_errors = 0;
3266 
3267 	stats->rx_errors = 0;
3268 	stats->tx_errors = 0;
3269 }
3270 
3271 static const struct net_device_ops ena_netdev_ops = {
3272 	.ndo_open		= ena_open,
3273 	.ndo_stop		= ena_close,
3274 	.ndo_start_xmit		= ena_start_xmit,
3275 	.ndo_select_queue	= ena_select_queue,
3276 	.ndo_get_stats64	= ena_get_stats64,
3277 	.ndo_tx_timeout		= ena_tx_timeout,
3278 	.ndo_change_mtu		= ena_change_mtu,
3279 	.ndo_set_mac_address	= NULL,
3280 	.ndo_validate_addr	= eth_validate_addr,
3281 	.ndo_bpf		= ena_xdp,
3282 };
3283 
3284 static int ena_device_validate_params(struct ena_adapter *adapter,
3285 				      struct ena_com_dev_get_features_ctx *get_feat_ctx)
3286 {
3287 	struct net_device *netdev = adapter->netdev;
3288 	int rc;
3289 
3290 	rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3291 			      adapter->mac_addr);
3292 	if (!rc) {
3293 		netif_err(adapter, drv, netdev,
3294 			  "Error, mac address are different\n");
3295 		return -EINVAL;
3296 	}
3297 
3298 	if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3299 		netif_err(adapter, drv, netdev,
3300 			  "Error, device max mtu is smaller than netdev MTU\n");
3301 		return -EINVAL;
3302 	}
3303 
3304 	return 0;
3305 }
3306 
3307 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3308 {
3309 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3310 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3311 	llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3312 	llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3313 	llq_config->llq_ring_entry_size_value = 128;
3314 }
3315 
3316 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3317 					   struct ena_com_dev *ena_dev,
3318 					   struct ena_admin_feature_llq_desc *llq,
3319 					   struct ena_llq_configurations *llq_default_configurations)
3320 {
3321 	int rc;
3322 	u32 llq_feature_mask;
3323 
3324 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3325 	if (!(ena_dev->supported_features & llq_feature_mask)) {
3326 		dev_err(&pdev->dev,
3327 			"LLQ is not supported Fallback to host mode policy.\n");
3328 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3329 		return 0;
3330 	}
3331 
3332 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3333 	if (unlikely(rc)) {
3334 		dev_err(&pdev->dev,
3335 			"Failed to configure the device mode.  Fallback to host mode policy.\n");
3336 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3337 	}
3338 
3339 	return 0;
3340 }
3341 
3342 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3343 			       int bars)
3344 {
3345 	bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3346 
3347 	if (!has_mem_bar) {
3348 		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3349 			dev_err(&pdev->dev,
3350 				"ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3351 			ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3352 		}
3353 
3354 		return 0;
3355 	}
3356 
3357 	ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3358 					   pci_resource_start(pdev, ENA_MEM_BAR),
3359 					   pci_resource_len(pdev, ENA_MEM_BAR));
3360 
3361 	if (!ena_dev->mem_bar)
3362 		return -EFAULT;
3363 
3364 	return 0;
3365 }
3366 
3367 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3368 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
3369 			   bool *wd_state)
3370 {
3371 	struct ena_llq_configurations llq_config;
3372 	struct device *dev = &pdev->dev;
3373 	bool readless_supported;
3374 	u32 aenq_groups;
3375 	int dma_width;
3376 	int rc;
3377 
3378 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
3379 	if (rc) {
3380 		dev_err(dev, "failed to init mmio read less\n");
3381 		return rc;
3382 	}
3383 
3384 	/* The PCIe configuration space revision id indicate if mmio reg
3385 	 * read is disabled
3386 	 */
3387 	readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3388 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3389 
3390 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3391 	if (rc) {
3392 		dev_err(dev, "Can not reset device\n");
3393 		goto err_mmio_read_less;
3394 	}
3395 
3396 	rc = ena_com_validate_version(ena_dev);
3397 	if (rc) {
3398 		dev_err(dev, "device version is too low\n");
3399 		goto err_mmio_read_less;
3400 	}
3401 
3402 	dma_width = ena_com_get_dma_width(ena_dev);
3403 	if (dma_width < 0) {
3404 		dev_err(dev, "Invalid dma width value %d", dma_width);
3405 		rc = dma_width;
3406 		goto err_mmio_read_less;
3407 	}
3408 
3409 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3410 	if (rc) {
3411 		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3412 		goto err_mmio_read_less;
3413 	}
3414 
3415 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3416 	if (rc) {
3417 		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3418 			rc);
3419 		goto err_mmio_read_less;
3420 	}
3421 
3422 	/* ENA admin level init */
3423 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3424 	if (rc) {
3425 		dev_err(dev,
3426 			"Can not initialize ena admin queue with device\n");
3427 		goto err_mmio_read_less;
3428 	}
3429 
3430 	/* To enable the msix interrupts the driver needs to know the number
3431 	 * of queues. So the driver uses polling mode to retrieve this
3432 	 * information
3433 	 */
3434 	ena_com_set_admin_polling_mode(ena_dev, true);
3435 
3436 	ena_config_host_info(ena_dev, pdev);
3437 
3438 	/* Get Device Attributes*/
3439 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3440 	if (rc) {
3441 		dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3442 		goto err_admin_init;
3443 	}
3444 
3445 	/* Try to turn all the available aenq groups */
3446 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3447 		BIT(ENA_ADMIN_FATAL_ERROR) |
3448 		BIT(ENA_ADMIN_WARNING) |
3449 		BIT(ENA_ADMIN_NOTIFICATION) |
3450 		BIT(ENA_ADMIN_KEEP_ALIVE);
3451 
3452 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
3453 
3454 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3455 	if (rc) {
3456 		dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3457 		goto err_admin_init;
3458 	}
3459 
3460 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3461 
3462 	set_default_llq_configurations(&llq_config);
3463 
3464 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3465 					     &llq_config);
3466 	if (rc) {
3467 		dev_err(&pdev->dev, "ena device init failed\n");
3468 		goto err_admin_init;
3469 	}
3470 
3471 	return 0;
3472 
3473 err_admin_init:
3474 	ena_com_delete_host_info(ena_dev);
3475 	ena_com_admin_destroy(ena_dev);
3476 err_mmio_read_less:
3477 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3478 
3479 	return rc;
3480 }
3481 
3482 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3483 {
3484 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3485 	struct device *dev = &adapter->pdev->dev;
3486 	int rc;
3487 
3488 	rc = ena_enable_msix(adapter);
3489 	if (rc) {
3490 		dev_err(dev, "Can not reserve msix vectors\n");
3491 		return rc;
3492 	}
3493 
3494 	ena_setup_mgmnt_intr(adapter);
3495 
3496 	rc = ena_request_mgmnt_irq(adapter);
3497 	if (rc) {
3498 		dev_err(dev, "Can not setup management interrupts\n");
3499 		goto err_disable_msix;
3500 	}
3501 
3502 	ena_com_set_admin_polling_mode(ena_dev, false);
3503 
3504 	ena_com_admin_aenq_enable(ena_dev);
3505 
3506 	return 0;
3507 
3508 err_disable_msix:
3509 	ena_disable_msix(adapter);
3510 
3511 	return rc;
3512 }
3513 
3514 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3515 {
3516 	struct net_device *netdev = adapter->netdev;
3517 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3518 	bool dev_up;
3519 
3520 	if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3521 		return;
3522 
3523 	netif_carrier_off(netdev);
3524 
3525 	del_timer_sync(&adapter->timer_service);
3526 
3527 	dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3528 	adapter->dev_up_before_reset = dev_up;
3529 	if (!graceful)
3530 		ena_com_set_admin_running_state(ena_dev, false);
3531 
3532 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3533 		ena_down(adapter);
3534 
3535 	/* Stop the device from sending AENQ events (in case reset flag is set
3536 	 *  and device is up, ena_down() already reset the device.
3537 	 */
3538 	if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3539 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3540 
3541 	ena_free_mgmnt_irq(adapter);
3542 
3543 	ena_disable_msix(adapter);
3544 
3545 	ena_com_abort_admin_commands(ena_dev);
3546 
3547 	ena_com_wait_for_abort_completion(ena_dev);
3548 
3549 	ena_com_admin_destroy(ena_dev);
3550 
3551 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3552 
3553 	/* return reset reason to default value */
3554 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3555 
3556 	clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3557 	clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3558 }
3559 
3560 static int ena_restore_device(struct ena_adapter *adapter)
3561 {
3562 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3563 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3564 	struct pci_dev *pdev = adapter->pdev;
3565 	bool wd_state;
3566 	int rc;
3567 
3568 	set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3569 	rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3570 	if (rc) {
3571 		dev_err(&pdev->dev, "Can not initialize device\n");
3572 		goto err;
3573 	}
3574 	adapter->wd_state = wd_state;
3575 
3576 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3577 	if (rc) {
3578 		dev_err(&pdev->dev, "Validation of device parameters failed\n");
3579 		goto err_device_destroy;
3580 	}
3581 
3582 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3583 	if (rc) {
3584 		dev_err(&pdev->dev, "Enable MSI-X failed\n");
3585 		goto err_device_destroy;
3586 	}
3587 	/* If the interface was up before the reset bring it up */
3588 	if (adapter->dev_up_before_reset) {
3589 		rc = ena_up(adapter);
3590 		if (rc) {
3591 			dev_err(&pdev->dev, "Failed to create I/O queues\n");
3592 			goto err_disable_msix;
3593 		}
3594 	}
3595 
3596 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3597 
3598 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3599 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3600 		netif_carrier_on(adapter->netdev);
3601 
3602 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3603 	dev_err(&pdev->dev, "Device reset completed successfully\n");
3604 	adapter->last_keep_alive_jiffies = jiffies;
3605 
3606 	return rc;
3607 err_disable_msix:
3608 	ena_free_mgmnt_irq(adapter);
3609 	ena_disable_msix(adapter);
3610 err_device_destroy:
3611 	ena_com_abort_admin_commands(ena_dev);
3612 	ena_com_wait_for_abort_completion(ena_dev);
3613 	ena_com_admin_destroy(ena_dev);
3614 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3615 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3616 err:
3617 	clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3618 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3619 	dev_err(&pdev->dev,
3620 		"Reset attempt failed. Can not reset the device\n");
3621 
3622 	return rc;
3623 }
3624 
3625 static void ena_fw_reset_device(struct work_struct *work)
3626 {
3627 	struct ena_adapter *adapter =
3628 		container_of(work, struct ena_adapter, reset_task);
3629 
3630 	rtnl_lock();
3631 
3632 	if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3633 		ena_destroy_device(adapter, false);
3634 		ena_restore_device(adapter);
3635 	}
3636 
3637 	rtnl_unlock();
3638 }
3639 
3640 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3641 					struct ena_ring *rx_ring)
3642 {
3643 	if (likely(rx_ring->first_interrupt))
3644 		return 0;
3645 
3646 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3647 		return 0;
3648 
3649 	rx_ring->no_interrupt_event_cnt++;
3650 
3651 	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3652 		netif_err(adapter, rx_err, adapter->netdev,
3653 			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3654 			  rx_ring->qid);
3655 		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3656 		smp_mb__before_atomic();
3657 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3658 		return -EIO;
3659 	}
3660 
3661 	return 0;
3662 }
3663 
3664 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3665 					  struct ena_ring *tx_ring)
3666 {
3667 	struct ena_tx_buffer *tx_buf;
3668 	unsigned long last_jiffies;
3669 	u32 missed_tx = 0;
3670 	int i, rc = 0;
3671 
3672 	for (i = 0; i < tx_ring->ring_size; i++) {
3673 		tx_buf = &tx_ring->tx_buffer_info[i];
3674 		last_jiffies = tx_buf->last_jiffies;
3675 
3676 		if (last_jiffies == 0)
3677 			/* no pending Tx at this location */
3678 			continue;
3679 
3680 		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3681 			     2 * adapter->missing_tx_completion_to))) {
3682 			/* If after graceful period interrupt is still not
3683 			 * received, we schedule a reset
3684 			 */
3685 			netif_err(adapter, tx_err, adapter->netdev,
3686 				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3687 				  tx_ring->qid);
3688 			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3689 			smp_mb__before_atomic();
3690 			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3691 			return -EIO;
3692 		}
3693 
3694 		if (unlikely(time_is_before_jiffies(last_jiffies +
3695 				adapter->missing_tx_completion_to))) {
3696 			if (!tx_buf->print_once)
3697 				netif_notice(adapter, tx_err, adapter->netdev,
3698 					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3699 					     tx_ring->qid, i);
3700 
3701 			tx_buf->print_once = 1;
3702 			missed_tx++;
3703 		}
3704 	}
3705 
3706 	if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3707 		netif_err(adapter, tx_err, adapter->netdev,
3708 			  "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3709 			  missed_tx,
3710 			  adapter->missing_tx_completion_threshold);
3711 		adapter->reset_reason =
3712 			ENA_REGS_RESET_MISS_TX_CMPL;
3713 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3714 		rc = -EIO;
3715 	}
3716 
3717 	u64_stats_update_begin(&tx_ring->syncp);
3718 	tx_ring->tx_stats.missed_tx += missed_tx;
3719 	u64_stats_update_end(&tx_ring->syncp);
3720 
3721 	return rc;
3722 }
3723 
3724 static void check_for_missing_completions(struct ena_adapter *adapter)
3725 {
3726 	struct ena_ring *tx_ring;
3727 	struct ena_ring *rx_ring;
3728 	int i, budget, rc;
3729 	int io_queue_count;
3730 
3731 	io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3732 	/* Make sure the driver doesn't turn the device in other process */
3733 	smp_rmb();
3734 
3735 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3736 		return;
3737 
3738 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3739 		return;
3740 
3741 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3742 		return;
3743 
3744 	budget = ENA_MONITORED_TX_QUEUES;
3745 
3746 	for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3747 		tx_ring = &adapter->tx_ring[i];
3748 		rx_ring = &adapter->rx_ring[i];
3749 
3750 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3751 		if (unlikely(rc))
3752 			return;
3753 
3754 		rc =  !ENA_IS_XDP_INDEX(adapter, i) ?
3755 			check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3756 		if (unlikely(rc))
3757 			return;
3758 
3759 		budget--;
3760 		if (!budget)
3761 			break;
3762 	}
3763 
3764 	adapter->last_monitored_tx_qid = i % io_queue_count;
3765 }
3766 
3767 /* trigger napi schedule after 2 consecutive detections */
3768 #define EMPTY_RX_REFILL 2
3769 /* For the rare case where the device runs out of Rx descriptors and the
3770  * napi handler failed to refill new Rx descriptors (due to a lack of memory
3771  * for example).
3772  * This case will lead to a deadlock:
3773  * The device won't send interrupts since all the new Rx packets will be dropped
3774  * The napi handler won't allocate new Rx descriptors so the device will be
3775  * able to send new packets.
3776  *
3777  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3778  * It is recommended to have at least 512MB, with a minimum of 128MB for
3779  * constrained environment).
3780  *
3781  * When such a situation is detected - Reschedule napi
3782  */
3783 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3784 {
3785 	struct ena_ring *rx_ring;
3786 	int i, refill_required;
3787 
3788 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3789 		return;
3790 
3791 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3792 		return;
3793 
3794 	for (i = 0; i < adapter->num_io_queues; i++) {
3795 		rx_ring = &adapter->rx_ring[i];
3796 
3797 		refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3798 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3799 			rx_ring->empty_rx_queue++;
3800 
3801 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3802 				u64_stats_update_begin(&rx_ring->syncp);
3803 				rx_ring->rx_stats.empty_rx_ring++;
3804 				u64_stats_update_end(&rx_ring->syncp);
3805 
3806 				netif_err(adapter, drv, adapter->netdev,
3807 					  "trigger refill for ring %d\n", i);
3808 
3809 				napi_schedule(rx_ring->napi);
3810 				rx_ring->empty_rx_queue = 0;
3811 			}
3812 		} else {
3813 			rx_ring->empty_rx_queue = 0;
3814 		}
3815 	}
3816 }
3817 
3818 /* Check for keep alive expiration */
3819 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3820 {
3821 	unsigned long keep_alive_expired;
3822 
3823 	if (!adapter->wd_state)
3824 		return;
3825 
3826 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3827 		return;
3828 
3829 	keep_alive_expired = adapter->last_keep_alive_jiffies +
3830 			     adapter->keep_alive_timeout;
3831 	if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3832 		netif_err(adapter, drv, adapter->netdev,
3833 			  "Keep alive watchdog timeout.\n");
3834 		u64_stats_update_begin(&adapter->syncp);
3835 		adapter->dev_stats.wd_expired++;
3836 		u64_stats_update_end(&adapter->syncp);
3837 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3838 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3839 	}
3840 }
3841 
3842 static void check_for_admin_com_state(struct ena_adapter *adapter)
3843 {
3844 	if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3845 		netif_err(adapter, drv, adapter->netdev,
3846 			  "ENA admin queue is not in running state!\n");
3847 		u64_stats_update_begin(&adapter->syncp);
3848 		adapter->dev_stats.admin_q_pause++;
3849 		u64_stats_update_end(&adapter->syncp);
3850 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3851 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3852 	}
3853 }
3854 
3855 static void ena_update_hints(struct ena_adapter *adapter,
3856 			     struct ena_admin_ena_hw_hints *hints)
3857 {
3858 	struct net_device *netdev = adapter->netdev;
3859 
3860 	if (hints->admin_completion_tx_timeout)
3861 		adapter->ena_dev->admin_queue.completion_timeout =
3862 			hints->admin_completion_tx_timeout * 1000;
3863 
3864 	if (hints->mmio_read_timeout)
3865 		/* convert to usec */
3866 		adapter->ena_dev->mmio_read.reg_read_to =
3867 			hints->mmio_read_timeout * 1000;
3868 
3869 	if (hints->missed_tx_completion_count_threshold_to_reset)
3870 		adapter->missing_tx_completion_threshold =
3871 			hints->missed_tx_completion_count_threshold_to_reset;
3872 
3873 	if (hints->missing_tx_completion_timeout) {
3874 		if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3875 			adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3876 		else
3877 			adapter->missing_tx_completion_to =
3878 				msecs_to_jiffies(hints->missing_tx_completion_timeout);
3879 	}
3880 
3881 	if (hints->netdev_wd_timeout)
3882 		netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3883 
3884 	if (hints->driver_watchdog_timeout) {
3885 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3886 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3887 		else
3888 			adapter->keep_alive_timeout =
3889 				msecs_to_jiffies(hints->driver_watchdog_timeout);
3890 	}
3891 }
3892 
3893 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3894 				 struct net_device *netdev)
3895 {
3896 	host_info->supported_network_features[0] =
3897 		netdev->features & GENMASK_ULL(31, 0);
3898 	host_info->supported_network_features[1] =
3899 		(netdev->features & GENMASK_ULL(63, 32)) >> 32;
3900 }
3901 
3902 static void ena_timer_service(struct timer_list *t)
3903 {
3904 	struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3905 	u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3906 	struct ena_admin_host_info *host_info =
3907 		adapter->ena_dev->host_attr.host_info;
3908 
3909 	check_for_missing_keep_alive(adapter);
3910 
3911 	check_for_admin_com_state(adapter);
3912 
3913 	check_for_missing_completions(adapter);
3914 
3915 	check_for_empty_rx_ring(adapter);
3916 
3917 	if (debug_area)
3918 		ena_dump_stats_to_buf(adapter, debug_area);
3919 
3920 	if (host_info)
3921 		ena_update_host_info(host_info, adapter->netdev);
3922 
3923 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3924 		netif_err(adapter, drv, adapter->netdev,
3925 			  "Trigger reset is on\n");
3926 		ena_dump_stats_to_dmesg(adapter);
3927 		queue_work(ena_wq, &adapter->reset_task);
3928 		return;
3929 	}
3930 
3931 	/* Reset the timer */
3932 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3933 }
3934 
3935 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3936 				     struct ena_com_dev *ena_dev,
3937 				     struct ena_com_dev_get_features_ctx *get_feat_ctx)
3938 {
3939 	u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3940 
3941 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3942 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3943 			&get_feat_ctx->max_queue_ext.max_queue_ext;
3944 		io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3945 				  max_queue_ext->max_rx_cq_num);
3946 
3947 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3948 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3949 	} else {
3950 		struct ena_admin_queue_feature_desc *max_queues =
3951 			&get_feat_ctx->max_queues;
3952 		io_tx_sq_num = max_queues->max_sq_num;
3953 		io_tx_cq_num = max_queues->max_cq_num;
3954 		io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3955 	}
3956 
3957 	/* In case of LLQ use the llq fields for the tx SQ/CQ */
3958 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3959 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3960 
3961 	max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3962 	max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3963 	max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3964 	max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3965 	/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3966 	max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3967 	if (unlikely(!max_num_io_queues)) {
3968 		dev_err(&pdev->dev, "The device doesn't have io queues\n");
3969 		return -EFAULT;
3970 	}
3971 
3972 	return max_num_io_queues;
3973 }
3974 
3975 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3976 				 struct net_device *netdev)
3977 {
3978 	netdev_features_t dev_features = 0;
3979 
3980 	/* Set offload features */
3981 	if (feat->offload.tx &
3982 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3983 		dev_features |= NETIF_F_IP_CSUM;
3984 
3985 	if (feat->offload.tx &
3986 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3987 		dev_features |= NETIF_F_IPV6_CSUM;
3988 
3989 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3990 		dev_features |= NETIF_F_TSO;
3991 
3992 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3993 		dev_features |= NETIF_F_TSO6;
3994 
3995 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3996 		dev_features |= NETIF_F_TSO_ECN;
3997 
3998 	if (feat->offload.rx_supported &
3999 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
4000 		dev_features |= NETIF_F_RXCSUM;
4001 
4002 	if (feat->offload.rx_supported &
4003 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
4004 		dev_features |= NETIF_F_RXCSUM;
4005 
4006 	netdev->features =
4007 		dev_features |
4008 		NETIF_F_SG |
4009 		NETIF_F_RXHASH |
4010 		NETIF_F_HIGHDMA;
4011 
4012 	netdev->hw_features |= netdev->features;
4013 	netdev->vlan_features |= netdev->features;
4014 }
4015 
4016 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
4017 				     struct ena_com_dev_get_features_ctx *feat)
4018 {
4019 	struct net_device *netdev = adapter->netdev;
4020 
4021 	/* Copy mac address */
4022 	if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
4023 		eth_hw_addr_random(netdev);
4024 		ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
4025 	} else {
4026 		ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
4027 		ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4028 	}
4029 
4030 	/* Set offload features */
4031 	ena_set_dev_offloads(feat, netdev);
4032 
4033 	adapter->max_mtu = feat->dev_attr.max_mtu;
4034 	netdev->max_mtu = adapter->max_mtu;
4035 	netdev->min_mtu = ENA_MIN_MTU;
4036 }
4037 
4038 static int ena_rss_init_default(struct ena_adapter *adapter)
4039 {
4040 	struct ena_com_dev *ena_dev = adapter->ena_dev;
4041 	struct device *dev = &adapter->pdev->dev;
4042 	int rc, i;
4043 	u32 val;
4044 
4045 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4046 	if (unlikely(rc)) {
4047 		dev_err(dev, "Cannot init indirect table\n");
4048 		goto err_rss_init;
4049 	}
4050 
4051 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4052 		val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4053 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4054 						       ENA_IO_RXQ_IDX(val));
4055 		if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4056 			dev_err(dev, "Cannot fill indirect table\n");
4057 			goto err_fill_indir;
4058 		}
4059 	}
4060 
4061 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4062 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4063 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4064 		dev_err(dev, "Cannot fill hash function\n");
4065 		goto err_fill_indir;
4066 	}
4067 
4068 	rc = ena_com_set_default_hash_ctrl(ena_dev);
4069 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4070 		dev_err(dev, "Cannot fill hash control\n");
4071 		goto err_fill_indir;
4072 	}
4073 
4074 	return 0;
4075 
4076 err_fill_indir:
4077 	ena_com_rss_destroy(ena_dev);
4078 err_rss_init:
4079 
4080 	return rc;
4081 }
4082 
4083 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4084 {
4085 	int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4086 
4087 	pci_release_selected_regions(pdev, release_bars);
4088 }
4089 
4090 
4091 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4092 {
4093 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4094 	struct ena_com_dev *ena_dev = ctx->ena_dev;
4095 	u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4096 	u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4097 	u32 max_tx_queue_size;
4098 	u32 max_rx_queue_size;
4099 
4100 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4101 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4102 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4103 		max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4104 					  max_queue_ext->max_rx_sq_depth);
4105 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4106 
4107 		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4108 			max_tx_queue_size = min_t(u32, max_tx_queue_size,
4109 						  llq->max_llq_depth);
4110 		else
4111 			max_tx_queue_size = min_t(u32, max_tx_queue_size,
4112 						  max_queue_ext->max_tx_sq_depth);
4113 
4114 		ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4115 					     max_queue_ext->max_per_packet_tx_descs);
4116 		ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4117 					     max_queue_ext->max_per_packet_rx_descs);
4118 	} else {
4119 		struct ena_admin_queue_feature_desc *max_queues =
4120 			&ctx->get_feat_ctx->max_queues;
4121 		max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4122 					  max_queues->max_sq_depth);
4123 		max_tx_queue_size = max_queues->max_cq_depth;
4124 
4125 		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4126 			max_tx_queue_size = min_t(u32, max_tx_queue_size,
4127 						  llq->max_llq_depth);
4128 		else
4129 			max_tx_queue_size = min_t(u32, max_tx_queue_size,
4130 						  max_queues->max_sq_depth);
4131 
4132 		ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4133 					     max_queues->max_packet_tx_descs);
4134 		ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4135 					     max_queues->max_packet_rx_descs);
4136 	}
4137 
4138 	max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4139 	max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4140 
4141 	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4142 				  max_tx_queue_size);
4143 	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4144 				  max_rx_queue_size);
4145 
4146 	tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4147 	rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4148 
4149 	ctx->max_tx_queue_size = max_tx_queue_size;
4150 	ctx->max_rx_queue_size = max_rx_queue_size;
4151 	ctx->tx_queue_size = tx_queue_size;
4152 	ctx->rx_queue_size = rx_queue_size;
4153 
4154 	return 0;
4155 }
4156 
4157 /* ena_probe - Device Initialization Routine
4158  * @pdev: PCI device information struct
4159  * @ent: entry in ena_pci_tbl
4160  *
4161  * Returns 0 on success, negative on failure
4162  *
4163  * ena_probe initializes an adapter identified by a pci_dev structure.
4164  * The OS initialization, configuring of the adapter private structure,
4165  * and a hardware reset occur.
4166  */
4167 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4168 {
4169 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
4170 	struct ena_com_dev_get_features_ctx get_feat_ctx;
4171 	struct ena_com_dev *ena_dev = NULL;
4172 	struct ena_adapter *adapter;
4173 	struct net_device *netdev;
4174 	static int adapters_found;
4175 	u32 max_num_io_queues;
4176 	char *queue_type_str;
4177 	bool wd_state;
4178 	int bars, rc;
4179 
4180 	dev_dbg(&pdev->dev, "%s\n", __func__);
4181 
4182 	rc = pci_enable_device_mem(pdev);
4183 	if (rc) {
4184 		dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4185 		return rc;
4186 	}
4187 
4188 	pci_set_master(pdev);
4189 
4190 	ena_dev = vzalloc(sizeof(*ena_dev));
4191 	if (!ena_dev) {
4192 		rc = -ENOMEM;
4193 		goto err_disable_device;
4194 	}
4195 
4196 	bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4197 	rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4198 	if (rc) {
4199 		dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4200 			rc);
4201 		goto err_free_ena_dev;
4202 	}
4203 
4204 	ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4205 					pci_resource_start(pdev, ENA_REG_BAR),
4206 					pci_resource_len(pdev, ENA_REG_BAR));
4207 	if (!ena_dev->reg_bar) {
4208 		dev_err(&pdev->dev, "failed to remap regs bar\n");
4209 		rc = -EFAULT;
4210 		goto err_free_region;
4211 	}
4212 
4213 	ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4214 
4215 	ena_dev->dmadev = &pdev->dev;
4216 
4217 	rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4218 	if (rc) {
4219 		dev_err(&pdev->dev, "ena device init failed\n");
4220 		if (rc == -ETIME)
4221 			rc = -EPROBE_DEFER;
4222 		goto err_free_region;
4223 	}
4224 
4225 	rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4226 	if (rc) {
4227 		dev_err(&pdev->dev, "ena llq bar mapping failed\n");
4228 		goto err_free_ena_dev;
4229 	}
4230 
4231 	calc_queue_ctx.ena_dev = ena_dev;
4232 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4233 	calc_queue_ctx.pdev = pdev;
4234 
4235 	/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4236 	 * Updated during device initialization with the real granularity
4237 	 */
4238 	ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4239 	ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4240 	ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4241 	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4242 	rc = ena_calc_io_queue_size(&calc_queue_ctx);
4243 	if (rc || !max_num_io_queues) {
4244 		rc = -EFAULT;
4245 		goto err_device_destroy;
4246 	}
4247 
4248 	/* dev zeroed in init_etherdev */
4249 	netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
4250 	if (!netdev) {
4251 		dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4252 		rc = -ENOMEM;
4253 		goto err_device_destroy;
4254 	}
4255 
4256 	SET_NETDEV_DEV(netdev, &pdev->dev);
4257 
4258 	adapter = netdev_priv(netdev);
4259 	pci_set_drvdata(pdev, adapter);
4260 
4261 	adapter->ena_dev = ena_dev;
4262 	adapter->netdev = netdev;
4263 	adapter->pdev = pdev;
4264 
4265 	ena_set_conf_feat_params(adapter, &get_feat_ctx);
4266 
4267 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4268 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4269 
4270 	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4271 	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4272 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4273 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4274 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4275 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4276 
4277 	adapter->num_io_queues = max_num_io_queues;
4278 	adapter->max_num_io_queues = max_num_io_queues;
4279 	adapter->last_monitored_tx_qid = 0;
4280 
4281 	adapter->xdp_first_ring = 0;
4282 	adapter->xdp_num_queues = 0;
4283 
4284 	adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4285 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4286 		adapter->disable_meta_caching =
4287 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4288 			   BIT(ENA_ADMIN_DISABLE_META_CACHING));
4289 
4290 	adapter->wd_state = wd_state;
4291 
4292 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4293 
4294 	rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4295 	if (rc) {
4296 		dev_err(&pdev->dev,
4297 			"Failed to query interrupt moderation feature\n");
4298 		goto err_netdev_destroy;
4299 	}
4300 	ena_init_io_rings(adapter,
4301 			  0,
4302 			  adapter->xdp_num_queues +
4303 			  adapter->num_io_queues);
4304 
4305 	netdev->netdev_ops = &ena_netdev_ops;
4306 	netdev->watchdog_timeo = TX_TIMEOUT;
4307 	ena_set_ethtool_ops(netdev);
4308 
4309 	netdev->priv_flags |= IFF_UNICAST_FLT;
4310 
4311 	u64_stats_init(&adapter->syncp);
4312 
4313 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4314 	if (rc) {
4315 		dev_err(&pdev->dev,
4316 			"Failed to enable and set the admin interrupts\n");
4317 		goto err_worker_destroy;
4318 	}
4319 	rc = ena_rss_init_default(adapter);
4320 	if (rc && (rc != -EOPNOTSUPP)) {
4321 		dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4322 		goto err_free_msix;
4323 	}
4324 
4325 	ena_config_debug_area(adapter);
4326 
4327 	if (!ena_update_hw_stats(adapter))
4328 		adapter->eni_stats_supported = true;
4329 	else
4330 		adapter->eni_stats_supported = false;
4331 
4332 	memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4333 
4334 	netif_carrier_off(netdev);
4335 
4336 	rc = register_netdev(netdev);
4337 	if (rc) {
4338 		dev_err(&pdev->dev, "Cannot register net device\n");
4339 		goto err_rss;
4340 	}
4341 
4342 	INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4343 
4344 	adapter->last_keep_alive_jiffies = jiffies;
4345 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4346 	adapter->missing_tx_completion_to = TX_TIMEOUT;
4347 	adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4348 
4349 	ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4350 
4351 	timer_setup(&adapter->timer_service, ena_timer_service, 0);
4352 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4353 
4354 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4355 		queue_type_str = "Regular";
4356 	else
4357 		queue_type_str = "Low Latency";
4358 
4359 	dev_info(&pdev->dev,
4360 		 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
4361 		 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4362 		 netdev->dev_addr, queue_type_str);
4363 
4364 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4365 
4366 	adapters_found++;
4367 
4368 	return 0;
4369 
4370 err_rss:
4371 	ena_com_delete_debug_area(ena_dev);
4372 	ena_com_rss_destroy(ena_dev);
4373 err_free_msix:
4374 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4375 	/* stop submitting admin commands on a device that was reset */
4376 	ena_com_set_admin_running_state(ena_dev, false);
4377 	ena_free_mgmnt_irq(adapter);
4378 	ena_disable_msix(adapter);
4379 err_worker_destroy:
4380 	del_timer(&adapter->timer_service);
4381 err_netdev_destroy:
4382 	free_netdev(netdev);
4383 err_device_destroy:
4384 	ena_com_delete_host_info(ena_dev);
4385 	ena_com_admin_destroy(ena_dev);
4386 err_free_region:
4387 	ena_release_bars(ena_dev, pdev);
4388 err_free_ena_dev:
4389 	vfree(ena_dev);
4390 err_disable_device:
4391 	pci_disable_device(pdev);
4392 	return rc;
4393 }
4394 
4395 /*****************************************************************************/
4396 
4397 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4398  * @pdev: PCI device information struct
4399  * @shutdown: Is it a shutdown operation? If false, means it is a removal
4400  *
4401  * __ena_shutoff is a helper routine that does the real work on shutdown and
4402  * removal paths; the difference between those paths is with regards to whether
4403  * dettach or unregister the netdevice.
4404  */
4405 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4406 {
4407 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
4408 	struct ena_com_dev *ena_dev;
4409 	struct net_device *netdev;
4410 
4411 	ena_dev = adapter->ena_dev;
4412 	netdev = adapter->netdev;
4413 
4414 #ifdef CONFIG_RFS_ACCEL
4415 	if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4416 		free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4417 		netdev->rx_cpu_rmap = NULL;
4418 	}
4419 #endif /* CONFIG_RFS_ACCEL */
4420 
4421 	/* Make sure timer and reset routine won't be called after
4422 	 * freeing device resources.
4423 	 */
4424 	del_timer_sync(&adapter->timer_service);
4425 	cancel_work_sync(&adapter->reset_task);
4426 
4427 	rtnl_lock(); /* lock released inside the below if-else block */
4428 	adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4429 	ena_destroy_device(adapter, true);
4430 	if (shutdown) {
4431 		netif_device_detach(netdev);
4432 		dev_close(netdev);
4433 		rtnl_unlock();
4434 	} else {
4435 		rtnl_unlock();
4436 		unregister_netdev(netdev);
4437 		free_netdev(netdev);
4438 	}
4439 
4440 	ena_com_rss_destroy(ena_dev);
4441 
4442 	ena_com_delete_debug_area(ena_dev);
4443 
4444 	ena_com_delete_host_info(ena_dev);
4445 
4446 	ena_release_bars(ena_dev, pdev);
4447 
4448 	pci_disable_device(pdev);
4449 
4450 	vfree(ena_dev);
4451 }
4452 
4453 /* ena_remove - Device Removal Routine
4454  * @pdev: PCI device information struct
4455  *
4456  * ena_remove is called by the PCI subsystem to alert the driver
4457  * that it should release a PCI device.
4458  */
4459 
4460 static void ena_remove(struct pci_dev *pdev)
4461 {
4462 	__ena_shutoff(pdev, false);
4463 }
4464 
4465 /* ena_shutdown - Device Shutdown Routine
4466  * @pdev: PCI device information struct
4467  *
4468  * ena_shutdown is called by the PCI subsystem to alert the driver that
4469  * a shutdown/reboot (or kexec) is happening and device must be disabled.
4470  */
4471 
4472 static void ena_shutdown(struct pci_dev *pdev)
4473 {
4474 	__ena_shutoff(pdev, true);
4475 }
4476 
4477 /* ena_suspend - PM suspend callback
4478  * @dev_d: Device information struct
4479  */
4480 static int __maybe_unused ena_suspend(struct device *dev_d)
4481 {
4482 	struct pci_dev *pdev = to_pci_dev(dev_d);
4483 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
4484 
4485 	u64_stats_update_begin(&adapter->syncp);
4486 	adapter->dev_stats.suspend++;
4487 	u64_stats_update_end(&adapter->syncp);
4488 
4489 	rtnl_lock();
4490 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4491 		dev_err(&pdev->dev,
4492 			"ignoring device reset request as the device is being suspended\n");
4493 		clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4494 	}
4495 	ena_destroy_device(adapter, true);
4496 	rtnl_unlock();
4497 	return 0;
4498 }
4499 
4500 /* ena_resume - PM resume callback
4501  * @dev_d: Device information struct
4502  */
4503 static int __maybe_unused ena_resume(struct device *dev_d)
4504 {
4505 	struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4506 	int rc;
4507 
4508 	u64_stats_update_begin(&adapter->syncp);
4509 	adapter->dev_stats.resume++;
4510 	u64_stats_update_end(&adapter->syncp);
4511 
4512 	rtnl_lock();
4513 	rc = ena_restore_device(adapter);
4514 	rtnl_unlock();
4515 	return rc;
4516 }
4517 
4518 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4519 
4520 static struct pci_driver ena_pci_driver = {
4521 	.name		= DRV_MODULE_NAME,
4522 	.id_table	= ena_pci_tbl,
4523 	.probe		= ena_probe,
4524 	.remove		= ena_remove,
4525 	.shutdown	= ena_shutdown,
4526 	.driver.pm	= &ena_pm_ops,
4527 	.sriov_configure = pci_sriov_configure_simple,
4528 };
4529 
4530 static int __init ena_init(void)
4531 {
4532 	ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4533 	if (!ena_wq) {
4534 		pr_err("Failed to create workqueue\n");
4535 		return -ENOMEM;
4536 	}
4537 
4538 	return pci_register_driver(&ena_pci_driver);
4539 }
4540 
4541 static void __exit ena_cleanup(void)
4542 {
4543 	pci_unregister_driver(&ena_pci_driver);
4544 
4545 	if (ena_wq) {
4546 		destroy_workqueue(ena_wq);
4547 		ena_wq = NULL;
4548 	}
4549 }
4550 
4551 /******************************************************************************
4552  ******************************** AENQ Handlers *******************************
4553  *****************************************************************************/
4554 /* ena_update_on_link_change:
4555  * Notify the network interface about the change in link status
4556  */
4557 static void ena_update_on_link_change(void *adapter_data,
4558 				      struct ena_admin_aenq_entry *aenq_e)
4559 {
4560 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4561 	struct ena_admin_aenq_link_change_desc *aenq_desc =
4562 		(struct ena_admin_aenq_link_change_desc *)aenq_e;
4563 	int status = aenq_desc->flags &
4564 		ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4565 
4566 	if (status) {
4567 		netdev_dbg(adapter->netdev, "%s\n", __func__);
4568 		set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4569 		if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4570 			netif_carrier_on(adapter->netdev);
4571 	} else {
4572 		clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4573 		netif_carrier_off(adapter->netdev);
4574 	}
4575 }
4576 
4577 static void ena_keep_alive_wd(void *adapter_data,
4578 			      struct ena_admin_aenq_entry *aenq_e)
4579 {
4580 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4581 	struct ena_admin_aenq_keep_alive_desc *desc;
4582 	u64 rx_drops;
4583 	u64 tx_drops;
4584 
4585 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4586 	adapter->last_keep_alive_jiffies = jiffies;
4587 
4588 	rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4589 	tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4590 
4591 	u64_stats_update_begin(&adapter->syncp);
4592 	/* These stats are accumulated by the device, so the counters indicate
4593 	 * all drops since last reset.
4594 	 */
4595 	adapter->dev_stats.rx_drops = rx_drops;
4596 	adapter->dev_stats.tx_drops = tx_drops;
4597 	u64_stats_update_end(&adapter->syncp);
4598 }
4599 
4600 static void ena_notification(void *adapter_data,
4601 			     struct ena_admin_aenq_entry *aenq_e)
4602 {
4603 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4604 	struct ena_admin_ena_hw_hints *hints;
4605 
4606 	WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4607 	     "Invalid group(%x) expected %x\n",
4608 	     aenq_e->aenq_common_desc.group,
4609 	     ENA_ADMIN_NOTIFICATION);
4610 
4611 	switch (aenq_e->aenq_common_desc.syndrom) {
4612 	case ENA_ADMIN_UPDATE_HINTS:
4613 		hints = (struct ena_admin_ena_hw_hints *)
4614 			(&aenq_e->inline_data_w4);
4615 		ena_update_hints(adapter, hints);
4616 		break;
4617 	default:
4618 		netif_err(adapter, drv, adapter->netdev,
4619 			  "Invalid aenq notification link state %d\n",
4620 			  aenq_e->aenq_common_desc.syndrom);
4621 	}
4622 }
4623 
4624 /* This handler will called for unknown event group or unimplemented handlers*/
4625 static void unimplemented_aenq_handler(void *data,
4626 				       struct ena_admin_aenq_entry *aenq_e)
4627 {
4628 	struct ena_adapter *adapter = (struct ena_adapter *)data;
4629 
4630 	netif_err(adapter, drv, adapter->netdev,
4631 		  "Unknown event was received or event with unimplemented handler\n");
4632 }
4633 
4634 static struct ena_aenq_handlers aenq_handlers = {
4635 	.handlers = {
4636 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4637 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
4638 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4639 	},
4640 	.unimplemented_handler = unimplemented_aenq_handler
4641 };
4642 
4643 module_init(ena_init);
4644 module_exit(ena_cleanup);
4645