1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
48 #include <net/ip.h>
49 
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
52 
53 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
54 
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION);
59 
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (5 * HZ)
62 
63 #define ENA_NAPI_BUDGET 64
64 
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 		NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug = -1;
68 module_param(debug, int, 0);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 static struct ena_aenq_handlers aenq_handlers;
72 
73 static struct workqueue_struct *ena_wq;
74 
75 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 
77 static int ena_rss_init_default(struct ena_adapter *adapter);
78 static void check_for_admin_com_state(struct ena_adapter *adapter);
79 static void ena_destroy_device(struct ena_adapter *adapter);
80 static int ena_restore_device(struct ena_adapter *adapter);
81 
82 static void ena_tx_timeout(struct net_device *dev)
83 {
84 	struct ena_adapter *adapter = netdev_priv(dev);
85 
86 	/* Change the state of the device to trigger reset
87 	 * Check that we are not in the middle or a trigger already
88 	 */
89 
90 	if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
91 		return;
92 
93 	adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
94 	u64_stats_update_begin(&adapter->syncp);
95 	adapter->dev_stats.tx_timeout++;
96 	u64_stats_update_end(&adapter->syncp);
97 
98 	netif_err(adapter, tx_err, dev, "Transmit time out\n");
99 }
100 
101 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
102 {
103 	int i;
104 
105 	for (i = 0; i < adapter->num_queues; i++)
106 		adapter->rx_ring[i].mtu = mtu;
107 }
108 
109 static int ena_change_mtu(struct net_device *dev, int new_mtu)
110 {
111 	struct ena_adapter *adapter = netdev_priv(dev);
112 	int ret;
113 
114 	ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
115 	if (!ret) {
116 		netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
117 		update_rx_ring_mtu(adapter, new_mtu);
118 		dev->mtu = new_mtu;
119 	} else {
120 		netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
121 			  new_mtu);
122 	}
123 
124 	return ret;
125 }
126 
127 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
128 {
129 #ifdef CONFIG_RFS_ACCEL
130 	u32 i;
131 	int rc;
132 
133 	adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
134 	if (!adapter->netdev->rx_cpu_rmap)
135 		return -ENOMEM;
136 	for (i = 0; i < adapter->num_queues; i++) {
137 		int irq_idx = ENA_IO_IRQ_IDX(i);
138 
139 		rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
140 				      pci_irq_vector(adapter->pdev, irq_idx));
141 		if (rc) {
142 			free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
143 			adapter->netdev->rx_cpu_rmap = NULL;
144 			return rc;
145 		}
146 	}
147 #endif /* CONFIG_RFS_ACCEL */
148 	return 0;
149 }
150 
151 static void ena_init_io_rings_common(struct ena_adapter *adapter,
152 				     struct ena_ring *ring, u16 qid)
153 {
154 	ring->qid = qid;
155 	ring->pdev = adapter->pdev;
156 	ring->dev = &adapter->pdev->dev;
157 	ring->netdev = adapter->netdev;
158 	ring->napi = &adapter->ena_napi[qid].napi;
159 	ring->adapter = adapter;
160 	ring->ena_dev = adapter->ena_dev;
161 	ring->per_napi_packets = 0;
162 	ring->per_napi_bytes = 0;
163 	ring->cpu = 0;
164 	ring->first_interrupt = false;
165 	ring->no_interrupt_event_cnt = 0;
166 	u64_stats_init(&ring->syncp);
167 }
168 
169 static void ena_init_io_rings(struct ena_adapter *adapter)
170 {
171 	struct ena_com_dev *ena_dev;
172 	struct ena_ring *txr, *rxr;
173 	int i;
174 
175 	ena_dev = adapter->ena_dev;
176 
177 	for (i = 0; i < adapter->num_queues; i++) {
178 		txr = &adapter->tx_ring[i];
179 		rxr = &adapter->rx_ring[i];
180 
181 		/* TX/RX common ring state */
182 		ena_init_io_rings_common(adapter, txr, i);
183 		ena_init_io_rings_common(adapter, rxr, i);
184 
185 		/* TX specific ring state */
186 		txr->ring_size = adapter->tx_ring_size;
187 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
188 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
189 		txr->sgl_size = adapter->max_tx_sgl_size;
190 		txr->smoothed_interval =
191 			ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
192 
193 		/* RX specific ring state */
194 		rxr->ring_size = adapter->rx_ring_size;
195 		rxr->rx_copybreak = adapter->rx_copybreak;
196 		rxr->sgl_size = adapter->max_rx_sgl_size;
197 		rxr->smoothed_interval =
198 			ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
199 		rxr->empty_rx_queue = 0;
200 	}
201 }
202 
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204  * @adapter: network interface device structure
205  * @qid: queue index
206  *
207  * Return 0 on success, negative on failure
208  */
209 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
210 {
211 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
212 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
213 	int size, i, node;
214 
215 	if (tx_ring->tx_buffer_info) {
216 		netif_err(adapter, ifup,
217 			  adapter->netdev, "tx_buffer_info info is not NULL");
218 		return -EEXIST;
219 	}
220 
221 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
222 	node = cpu_to_node(ena_irq->cpu);
223 
224 	tx_ring->tx_buffer_info = vzalloc_node(size, node);
225 	if (!tx_ring->tx_buffer_info) {
226 		tx_ring->tx_buffer_info = vzalloc(size);
227 		if (!tx_ring->tx_buffer_info)
228 			return -ENOMEM;
229 	}
230 
231 	size = sizeof(u16) * tx_ring->ring_size;
232 	tx_ring->free_tx_ids = vzalloc_node(size, node);
233 	if (!tx_ring->free_tx_ids) {
234 		tx_ring->free_tx_ids = vzalloc(size);
235 		if (!tx_ring->free_tx_ids) {
236 			vfree(tx_ring->tx_buffer_info);
237 			return -ENOMEM;
238 		}
239 	}
240 
241 	/* Req id ring for TX out of order completions */
242 	for (i = 0; i < tx_ring->ring_size; i++)
243 		tx_ring->free_tx_ids[i] = i;
244 
245 	/* Reset tx statistics */
246 	memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
247 
248 	tx_ring->next_to_use = 0;
249 	tx_ring->next_to_clean = 0;
250 	tx_ring->cpu = ena_irq->cpu;
251 	return 0;
252 }
253 
254 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
255  * @adapter: network interface device structure
256  * @qid: queue index
257  *
258  * Free all transmit software resources
259  */
260 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
261 {
262 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
263 
264 	vfree(tx_ring->tx_buffer_info);
265 	tx_ring->tx_buffer_info = NULL;
266 
267 	vfree(tx_ring->free_tx_ids);
268 	tx_ring->free_tx_ids = NULL;
269 }
270 
271 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
272  * @adapter: private structure
273  *
274  * Return 0 on success, negative on failure
275  */
276 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
277 {
278 	int i, rc = 0;
279 
280 	for (i = 0; i < adapter->num_queues; i++) {
281 		rc = ena_setup_tx_resources(adapter, i);
282 		if (rc)
283 			goto err_setup_tx;
284 	}
285 
286 	return 0;
287 
288 err_setup_tx:
289 
290 	netif_err(adapter, ifup, adapter->netdev,
291 		  "Tx queue %d: allocation failed\n", i);
292 
293 	/* rewind the index freeing the rings as we go */
294 	while (i--)
295 		ena_free_tx_resources(adapter, i);
296 	return rc;
297 }
298 
299 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
300  * @adapter: board private structure
301  *
302  * Free all transmit software resources
303  */
304 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
305 {
306 	int i;
307 
308 	for (i = 0; i < adapter->num_queues; i++)
309 		ena_free_tx_resources(adapter, i);
310 }
311 
312 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
313 {
314 	if (likely(req_id < rx_ring->ring_size))
315 		return 0;
316 
317 	netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
318 		  "Invalid rx req_id: %hu\n", req_id);
319 
320 	u64_stats_update_begin(&rx_ring->syncp);
321 	rx_ring->rx_stats.bad_req_id++;
322 	u64_stats_update_end(&rx_ring->syncp);
323 
324 	/* Trigger device reset */
325 	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
326 	set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
327 	return -EFAULT;
328 }
329 
330 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
331  * @adapter: network interface device structure
332  * @qid: queue index
333  *
334  * Returns 0 on success, negative on failure
335  */
336 static int ena_setup_rx_resources(struct ena_adapter *adapter,
337 				  u32 qid)
338 {
339 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
340 	struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
341 	int size, node, i;
342 
343 	if (rx_ring->rx_buffer_info) {
344 		netif_err(adapter, ifup, adapter->netdev,
345 			  "rx_buffer_info is not NULL");
346 		return -EEXIST;
347 	}
348 
349 	/* alloc extra element so in rx path
350 	 * we can always prefetch rx_info + 1
351 	 */
352 	size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
353 	node = cpu_to_node(ena_irq->cpu);
354 
355 	rx_ring->rx_buffer_info = vzalloc_node(size, node);
356 	if (!rx_ring->rx_buffer_info) {
357 		rx_ring->rx_buffer_info = vzalloc(size);
358 		if (!rx_ring->rx_buffer_info)
359 			return -ENOMEM;
360 	}
361 
362 	size = sizeof(u16) * rx_ring->ring_size;
363 	rx_ring->free_rx_ids = vzalloc_node(size, node);
364 	if (!rx_ring->free_rx_ids) {
365 		rx_ring->free_rx_ids = vzalloc(size);
366 		if (!rx_ring->free_rx_ids) {
367 			vfree(rx_ring->rx_buffer_info);
368 			return -ENOMEM;
369 		}
370 	}
371 
372 	/* Req id ring for receiving RX pkts out of order */
373 	for (i = 0; i < rx_ring->ring_size; i++)
374 		rx_ring->free_rx_ids[i] = i;
375 
376 	/* Reset rx statistics */
377 	memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
378 
379 	rx_ring->next_to_clean = 0;
380 	rx_ring->next_to_use = 0;
381 	rx_ring->cpu = ena_irq->cpu;
382 
383 	return 0;
384 }
385 
386 /* ena_free_rx_resources - Free I/O Rx Resources
387  * @adapter: network interface device structure
388  * @qid: queue index
389  *
390  * Free all receive software resources
391  */
392 static void ena_free_rx_resources(struct ena_adapter *adapter,
393 				  u32 qid)
394 {
395 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
396 
397 	vfree(rx_ring->rx_buffer_info);
398 	rx_ring->rx_buffer_info = NULL;
399 
400 	vfree(rx_ring->free_rx_ids);
401 	rx_ring->free_rx_ids = NULL;
402 }
403 
404 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
405  * @adapter: board private structure
406  *
407  * Return 0 on success, negative on failure
408  */
409 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
410 {
411 	int i, rc = 0;
412 
413 	for (i = 0; i < adapter->num_queues; i++) {
414 		rc = ena_setup_rx_resources(adapter, i);
415 		if (rc)
416 			goto err_setup_rx;
417 	}
418 
419 	return 0;
420 
421 err_setup_rx:
422 
423 	netif_err(adapter, ifup, adapter->netdev,
424 		  "Rx queue %d: allocation failed\n", i);
425 
426 	/* rewind the index freeing the rings as we go */
427 	while (i--)
428 		ena_free_rx_resources(adapter, i);
429 	return rc;
430 }
431 
432 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
433  * @adapter: board private structure
434  *
435  * Free all receive software resources
436  */
437 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
438 {
439 	int i;
440 
441 	for (i = 0; i < adapter->num_queues; i++)
442 		ena_free_rx_resources(adapter, i);
443 }
444 
445 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
446 				    struct ena_rx_buffer *rx_info, gfp_t gfp)
447 {
448 	struct ena_com_buf *ena_buf;
449 	struct page *page;
450 	dma_addr_t dma;
451 
452 	/* if previous allocated page is not used */
453 	if (unlikely(rx_info->page))
454 		return 0;
455 
456 	page = alloc_page(gfp);
457 	if (unlikely(!page)) {
458 		u64_stats_update_begin(&rx_ring->syncp);
459 		rx_ring->rx_stats.page_alloc_fail++;
460 		u64_stats_update_end(&rx_ring->syncp);
461 		return -ENOMEM;
462 	}
463 
464 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
465 			   DMA_FROM_DEVICE);
466 	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467 		u64_stats_update_begin(&rx_ring->syncp);
468 		rx_ring->rx_stats.dma_mapping_err++;
469 		u64_stats_update_end(&rx_ring->syncp);
470 
471 		__free_page(page);
472 		return -EIO;
473 	}
474 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
475 		  "alloc page %p, rx_info %p\n", page, rx_info);
476 
477 	rx_info->page = page;
478 	rx_info->page_offset = 0;
479 	ena_buf = &rx_info->ena_buf;
480 	ena_buf->paddr = dma;
481 	ena_buf->len = PAGE_SIZE;
482 
483 	return 0;
484 }
485 
486 static void ena_free_rx_page(struct ena_ring *rx_ring,
487 			     struct ena_rx_buffer *rx_info)
488 {
489 	struct page *page = rx_info->page;
490 	struct ena_com_buf *ena_buf = &rx_info->ena_buf;
491 
492 	if (unlikely(!page)) {
493 		netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
494 			   "Trying to free unallocated buffer\n");
495 		return;
496 	}
497 
498 	dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
499 		       DMA_FROM_DEVICE);
500 
501 	__free_page(page);
502 	rx_info->page = NULL;
503 }
504 
505 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
506 {
507 	u16 next_to_use, req_id;
508 	u32 i;
509 	int rc;
510 
511 	next_to_use = rx_ring->next_to_use;
512 
513 	for (i = 0; i < num; i++) {
514 		struct ena_rx_buffer *rx_info;
515 
516 		req_id = rx_ring->free_rx_ids[next_to_use];
517 		rc = validate_rx_req_id(rx_ring, req_id);
518 		if (unlikely(rc < 0))
519 			break;
520 
521 		rx_info = &rx_ring->rx_buffer_info[req_id];
522 
523 
524 		rc = ena_alloc_rx_page(rx_ring, rx_info,
525 				       GFP_ATOMIC | __GFP_COMP);
526 		if (unlikely(rc < 0)) {
527 			netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
528 				   "failed to alloc buffer for rx queue %d\n",
529 				   rx_ring->qid);
530 			break;
531 		}
532 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
533 						&rx_info->ena_buf,
534 						req_id);
535 		if (unlikely(rc)) {
536 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
537 				   "failed to add buffer for rx queue %d\n",
538 				   rx_ring->qid);
539 			break;
540 		}
541 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
542 						   rx_ring->ring_size);
543 	}
544 
545 	if (unlikely(i < num)) {
546 		u64_stats_update_begin(&rx_ring->syncp);
547 		rx_ring->rx_stats.refil_partial++;
548 		u64_stats_update_end(&rx_ring->syncp);
549 		netdev_warn(rx_ring->netdev,
550 			    "refilled rx qid %d with only %d buffers (from %d)\n",
551 			    rx_ring->qid, i, num);
552 	}
553 
554 	if (likely(i)) {
555 		/* Add memory barrier to make sure the desc were written before
556 		 * issue a doorbell
557 		 */
558 		wmb();
559 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
560 		mmiowb();
561 	}
562 
563 	rx_ring->next_to_use = next_to_use;
564 
565 	return i;
566 }
567 
568 static void ena_free_rx_bufs(struct ena_adapter *adapter,
569 			     u32 qid)
570 {
571 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
572 	u32 i;
573 
574 	for (i = 0; i < rx_ring->ring_size; i++) {
575 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
576 
577 		if (rx_info->page)
578 			ena_free_rx_page(rx_ring, rx_info);
579 	}
580 }
581 
582 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
583  * @adapter: board private structure
584  *
585  */
586 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
587 {
588 	struct ena_ring *rx_ring;
589 	int i, rc, bufs_num;
590 
591 	for (i = 0; i < adapter->num_queues; i++) {
592 		rx_ring = &adapter->rx_ring[i];
593 		bufs_num = rx_ring->ring_size - 1;
594 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
595 
596 		if (unlikely(rc != bufs_num))
597 			netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
598 				   "refilling Queue %d failed. allocated %d buffers from: %d\n",
599 				   i, rc, bufs_num);
600 	}
601 }
602 
603 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
604 {
605 	int i;
606 
607 	for (i = 0; i < adapter->num_queues; i++)
608 		ena_free_rx_bufs(adapter, i);
609 }
610 
611 /* ena_free_tx_bufs - Free Tx Buffers per Queue
612  * @tx_ring: TX ring for which buffers be freed
613  */
614 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
615 {
616 	bool print_once = true;
617 	u32 i;
618 
619 	for (i = 0; i < tx_ring->ring_size; i++) {
620 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
621 		struct ena_com_buf *ena_buf;
622 		int nr_frags;
623 		int j;
624 
625 		if (!tx_info->skb)
626 			continue;
627 
628 		if (print_once) {
629 			netdev_notice(tx_ring->netdev,
630 				      "free uncompleted tx skb qid %d idx 0x%x\n",
631 				      tx_ring->qid, i);
632 			print_once = false;
633 		} else {
634 			netdev_dbg(tx_ring->netdev,
635 				   "free uncompleted tx skb qid %d idx 0x%x\n",
636 				   tx_ring->qid, i);
637 		}
638 
639 		ena_buf = tx_info->bufs;
640 		dma_unmap_single(tx_ring->dev,
641 				 ena_buf->paddr,
642 				 ena_buf->len,
643 				 DMA_TO_DEVICE);
644 
645 		/* unmap remaining mapped pages */
646 		nr_frags = tx_info->num_of_bufs - 1;
647 		for (j = 0; j < nr_frags; j++) {
648 			ena_buf++;
649 			dma_unmap_page(tx_ring->dev,
650 				       ena_buf->paddr,
651 				       ena_buf->len,
652 				       DMA_TO_DEVICE);
653 		}
654 
655 		dev_kfree_skb_any(tx_info->skb);
656 	}
657 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
658 						  tx_ring->qid));
659 }
660 
661 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
662 {
663 	struct ena_ring *tx_ring;
664 	int i;
665 
666 	for (i = 0; i < adapter->num_queues; i++) {
667 		tx_ring = &adapter->tx_ring[i];
668 		ena_free_tx_bufs(tx_ring);
669 	}
670 }
671 
672 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
673 {
674 	u16 ena_qid;
675 	int i;
676 
677 	for (i = 0; i < adapter->num_queues; i++) {
678 		ena_qid = ENA_IO_TXQ_IDX(i);
679 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
680 	}
681 }
682 
683 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
684 {
685 	u16 ena_qid;
686 	int i;
687 
688 	for (i = 0; i < adapter->num_queues; i++) {
689 		ena_qid = ENA_IO_RXQ_IDX(i);
690 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
691 	}
692 }
693 
694 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
695 {
696 	ena_destroy_all_tx_queues(adapter);
697 	ena_destroy_all_rx_queues(adapter);
698 }
699 
700 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
701 {
702 	struct ena_tx_buffer *tx_info = NULL;
703 
704 	if (likely(req_id < tx_ring->ring_size)) {
705 		tx_info = &tx_ring->tx_buffer_info[req_id];
706 		if (likely(tx_info->skb))
707 			return 0;
708 	}
709 
710 	if (tx_info)
711 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
712 			  "tx_info doesn't have valid skb\n");
713 	else
714 		netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
715 			  "Invalid req_id: %hu\n", req_id);
716 
717 	u64_stats_update_begin(&tx_ring->syncp);
718 	tx_ring->tx_stats.bad_req_id++;
719 	u64_stats_update_end(&tx_ring->syncp);
720 
721 	/* Trigger device reset */
722 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
723 	set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
724 	return -EFAULT;
725 }
726 
727 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
728 {
729 	struct netdev_queue *txq;
730 	bool above_thresh;
731 	u32 tx_bytes = 0;
732 	u32 total_done = 0;
733 	u16 next_to_clean;
734 	u16 req_id;
735 	int tx_pkts = 0;
736 	int rc;
737 
738 	next_to_clean = tx_ring->next_to_clean;
739 	txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
740 
741 	while (tx_pkts < budget) {
742 		struct ena_tx_buffer *tx_info;
743 		struct sk_buff *skb;
744 		struct ena_com_buf *ena_buf;
745 		int i, nr_frags;
746 
747 		rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
748 						&req_id);
749 		if (rc)
750 			break;
751 
752 		rc = validate_tx_req_id(tx_ring, req_id);
753 		if (rc)
754 			break;
755 
756 		tx_info = &tx_ring->tx_buffer_info[req_id];
757 		skb = tx_info->skb;
758 
759 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
760 		prefetch(&skb->end);
761 
762 		tx_info->skb = NULL;
763 		tx_info->last_jiffies = 0;
764 
765 		if (likely(tx_info->num_of_bufs != 0)) {
766 			ena_buf = tx_info->bufs;
767 
768 			dma_unmap_single(tx_ring->dev,
769 					 dma_unmap_addr(ena_buf, paddr),
770 					 dma_unmap_len(ena_buf, len),
771 					 DMA_TO_DEVICE);
772 
773 			/* unmap remaining mapped pages */
774 			nr_frags = tx_info->num_of_bufs - 1;
775 			for (i = 0; i < nr_frags; i++) {
776 				ena_buf++;
777 				dma_unmap_page(tx_ring->dev,
778 					       dma_unmap_addr(ena_buf, paddr),
779 					       dma_unmap_len(ena_buf, len),
780 					       DMA_TO_DEVICE);
781 			}
782 		}
783 
784 		netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
785 			  "tx_poll: q %d skb %p completed\n", tx_ring->qid,
786 			  skb);
787 
788 		tx_bytes += skb->len;
789 		dev_kfree_skb(skb);
790 		tx_pkts++;
791 		total_done += tx_info->tx_descs;
792 
793 		tx_ring->free_tx_ids[next_to_clean] = req_id;
794 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
795 						     tx_ring->ring_size);
796 	}
797 
798 	tx_ring->next_to_clean = next_to_clean;
799 	ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
800 	ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
801 
802 	netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
803 
804 	netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
805 		  "tx_poll: q %d done. total pkts: %d\n",
806 		  tx_ring->qid, tx_pkts);
807 
808 	/* need to make the rings circular update visible to
809 	 * ena_start_xmit() before checking for netif_queue_stopped().
810 	 */
811 	smp_mb();
812 
813 	above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
814 		ENA_TX_WAKEUP_THRESH;
815 	if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
816 		__netif_tx_lock(txq, smp_processor_id());
817 		above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
818 			ENA_TX_WAKEUP_THRESH;
819 		if (netif_tx_queue_stopped(txq) && above_thresh) {
820 			netif_tx_wake_queue(txq);
821 			u64_stats_update_begin(&tx_ring->syncp);
822 			tx_ring->tx_stats.queue_wakeup++;
823 			u64_stats_update_end(&tx_ring->syncp);
824 		}
825 		__netif_tx_unlock(txq);
826 	}
827 
828 	tx_ring->per_napi_bytes += tx_bytes;
829 	tx_ring->per_napi_packets += tx_pkts;
830 
831 	return tx_pkts;
832 }
833 
834 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
835 {
836 	struct sk_buff *skb;
837 
838 	if (frags)
839 		skb = napi_get_frags(rx_ring->napi);
840 	else
841 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
842 						rx_ring->rx_copybreak);
843 
844 	if (unlikely(!skb)) {
845 		u64_stats_update_begin(&rx_ring->syncp);
846 		rx_ring->rx_stats.skb_alloc_fail++;
847 		u64_stats_update_end(&rx_ring->syncp);
848 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
849 			  "Failed to allocate skb. frags: %d\n", frags);
850 		return NULL;
851 	}
852 
853 	return skb;
854 }
855 
856 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
857 				  struct ena_com_rx_buf_info *ena_bufs,
858 				  u32 descs,
859 				  u16 *next_to_clean)
860 {
861 	struct sk_buff *skb;
862 	struct ena_rx_buffer *rx_info;
863 	u16 len, req_id, buf = 0;
864 	void *va;
865 
866 	len = ena_bufs[buf].len;
867 	req_id = ena_bufs[buf].req_id;
868 	rx_info = &rx_ring->rx_buffer_info[req_id];
869 
870 	if (unlikely(!rx_info->page)) {
871 		netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
872 			  "Page is NULL\n");
873 		return NULL;
874 	}
875 
876 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
877 		  "rx_info %p page %p\n",
878 		  rx_info, rx_info->page);
879 
880 	/* save virt address of first buffer */
881 	va = page_address(rx_info->page) + rx_info->page_offset;
882 	prefetch(va + NET_IP_ALIGN);
883 
884 	if (len <= rx_ring->rx_copybreak) {
885 		skb = ena_alloc_skb(rx_ring, false);
886 		if (unlikely(!skb))
887 			return NULL;
888 
889 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
890 			  "rx allocated small packet. len %d. data_len %d\n",
891 			  skb->len, skb->data_len);
892 
893 		/* sync this buffer for CPU use */
894 		dma_sync_single_for_cpu(rx_ring->dev,
895 					dma_unmap_addr(&rx_info->ena_buf, paddr),
896 					len,
897 					DMA_FROM_DEVICE);
898 		skb_copy_to_linear_data(skb, va, len);
899 		dma_sync_single_for_device(rx_ring->dev,
900 					   dma_unmap_addr(&rx_info->ena_buf, paddr),
901 					   len,
902 					   DMA_FROM_DEVICE);
903 
904 		skb_put(skb, len);
905 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
906 		rx_ring->free_rx_ids[*next_to_clean] = req_id;
907 		*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
908 						     rx_ring->ring_size);
909 		return skb;
910 	}
911 
912 	skb = ena_alloc_skb(rx_ring, true);
913 	if (unlikely(!skb))
914 		return NULL;
915 
916 	do {
917 		dma_unmap_page(rx_ring->dev,
918 			       dma_unmap_addr(&rx_info->ena_buf, paddr),
919 			       PAGE_SIZE, DMA_FROM_DEVICE);
920 
921 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922 				rx_info->page_offset, len, PAGE_SIZE);
923 
924 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925 			  "rx skb updated. len %d. data_len %d\n",
926 			  skb->len, skb->data_len);
927 
928 		rx_info->page = NULL;
929 
930 		rx_ring->free_rx_ids[*next_to_clean] = req_id;
931 		*next_to_clean =
932 			ENA_RX_RING_IDX_NEXT(*next_to_clean,
933 					     rx_ring->ring_size);
934 		if (likely(--descs == 0))
935 			break;
936 
937 		buf++;
938 		len = ena_bufs[buf].len;
939 		req_id = ena_bufs[buf].req_id;
940 		rx_info = &rx_ring->rx_buffer_info[req_id];
941 	} while (1);
942 
943 	return skb;
944 }
945 
946 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
947  * @adapter: structure containing adapter specific data
948  * @ena_rx_ctx: received packet context/metadata
949  * @skb: skb currently being received and modified
950  */
951 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
952 				   struct ena_com_rx_ctx *ena_rx_ctx,
953 				   struct sk_buff *skb)
954 {
955 	/* Rx csum disabled */
956 	if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
957 		skb->ip_summed = CHECKSUM_NONE;
958 		return;
959 	}
960 
961 	/* For fragmented packets the checksum isn't valid */
962 	if (ena_rx_ctx->frag) {
963 		skb->ip_summed = CHECKSUM_NONE;
964 		return;
965 	}
966 
967 	/* if IP and error */
968 	if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
969 		     (ena_rx_ctx->l3_csum_err))) {
970 		/* ipv4 checksum error */
971 		skb->ip_summed = CHECKSUM_NONE;
972 		u64_stats_update_begin(&rx_ring->syncp);
973 		rx_ring->rx_stats.bad_csum++;
974 		u64_stats_update_end(&rx_ring->syncp);
975 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
976 			  "RX IPv4 header checksum error\n");
977 		return;
978 	}
979 
980 	/* if TCP/UDP */
981 	if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
982 		   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
983 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
984 			/* TCP/UDP checksum error */
985 			u64_stats_update_begin(&rx_ring->syncp);
986 			rx_ring->rx_stats.bad_csum++;
987 			u64_stats_update_end(&rx_ring->syncp);
988 			netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
989 				  "RX L4 checksum error\n");
990 			skb->ip_summed = CHECKSUM_NONE;
991 			return;
992 		}
993 
994 		skb->ip_summed = CHECKSUM_UNNECESSARY;
995 	}
996 }
997 
998 static void ena_set_rx_hash(struct ena_ring *rx_ring,
999 			    struct ena_com_rx_ctx *ena_rx_ctx,
1000 			    struct sk_buff *skb)
1001 {
1002 	enum pkt_hash_types hash_type;
1003 
1004 	if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1005 		if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1006 			   (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1007 
1008 			hash_type = PKT_HASH_TYPE_L4;
1009 		else
1010 			hash_type = PKT_HASH_TYPE_NONE;
1011 
1012 		/* Override hash type if the packet is fragmented */
1013 		if (ena_rx_ctx->frag)
1014 			hash_type = PKT_HASH_TYPE_NONE;
1015 
1016 		skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1017 	}
1018 }
1019 
1020 /* ena_clean_rx_irq - Cleanup RX irq
1021  * @rx_ring: RX ring to clean
1022  * @napi: napi handler
1023  * @budget: how many packets driver is allowed to clean
1024  *
1025  * Returns the number of cleaned buffers.
1026  */
1027 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1028 			    u32 budget)
1029 {
1030 	u16 next_to_clean = rx_ring->next_to_clean;
1031 	u32 res_budget, work_done;
1032 
1033 	struct ena_com_rx_ctx ena_rx_ctx;
1034 	struct ena_adapter *adapter;
1035 	struct sk_buff *skb;
1036 	int refill_required;
1037 	int refill_threshold;
1038 	int rc = 0;
1039 	int total_len = 0;
1040 	int rx_copybreak_pkt = 0;
1041 	int i;
1042 
1043 	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1044 		  "%s qid %d\n", __func__, rx_ring->qid);
1045 	res_budget = budget;
1046 
1047 	do {
1048 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1049 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1050 		ena_rx_ctx.descs = 0;
1051 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1052 				    rx_ring->ena_com_io_sq,
1053 				    &ena_rx_ctx);
1054 		if (unlikely(rc))
1055 			goto error;
1056 
1057 		if (unlikely(ena_rx_ctx.descs == 0))
1058 			break;
1059 
1060 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1061 			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1062 			  rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1063 			  ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1064 
1065 		/* allocate skb and fill it */
1066 		skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1067 				 &next_to_clean);
1068 
1069 		/* exit if we failed to retrieve a buffer */
1070 		if (unlikely(!skb)) {
1071 			for (i = 0; i < ena_rx_ctx.descs; i++) {
1072 				rx_ring->free_tx_ids[next_to_clean] =
1073 					rx_ring->ena_bufs[i].req_id;
1074 				next_to_clean =
1075 					ENA_RX_RING_IDX_NEXT(next_to_clean,
1076 							     rx_ring->ring_size);
1077 			}
1078 			break;
1079 		}
1080 
1081 		ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1082 
1083 		ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1084 
1085 		skb_record_rx_queue(skb, rx_ring->qid);
1086 
1087 		if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1088 			total_len += rx_ring->ena_bufs[0].len;
1089 			rx_copybreak_pkt++;
1090 			napi_gro_receive(napi, skb);
1091 		} else {
1092 			total_len += skb->len;
1093 			napi_gro_frags(napi);
1094 		}
1095 
1096 		res_budget--;
1097 	} while (likely(res_budget));
1098 
1099 	work_done = budget - res_budget;
1100 	rx_ring->per_napi_bytes += total_len;
1101 	rx_ring->per_napi_packets += work_done;
1102 	u64_stats_update_begin(&rx_ring->syncp);
1103 	rx_ring->rx_stats.bytes += total_len;
1104 	rx_ring->rx_stats.cnt += work_done;
1105 	rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1106 	u64_stats_update_end(&rx_ring->syncp);
1107 
1108 	rx_ring->next_to_clean = next_to_clean;
1109 
1110 	refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1111 	refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1112 
1113 	/* Optimization, try to batch new rx buffers */
1114 	if (refill_required > refill_threshold) {
1115 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1116 		ena_refill_rx_bufs(rx_ring, refill_required);
1117 	}
1118 
1119 	return work_done;
1120 
1121 error:
1122 	adapter = netdev_priv(rx_ring->netdev);
1123 
1124 	u64_stats_update_begin(&rx_ring->syncp);
1125 	rx_ring->rx_stats.bad_desc_num++;
1126 	u64_stats_update_end(&rx_ring->syncp);
1127 
1128 	/* Too many desc from the device. Trigger reset */
1129 	adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1130 	set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1131 
1132 	return 0;
1133 }
1134 
1135 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1136 				       struct ena_ring *tx_ring)
1137 {
1138 	/* We apply adaptive moderation on Rx path only.
1139 	 * Tx uses static interrupt moderation.
1140 	 */
1141 	ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1142 					  rx_ring->per_napi_packets,
1143 					  rx_ring->per_napi_bytes,
1144 					  &rx_ring->smoothed_interval,
1145 					  &rx_ring->moder_tbl_idx);
1146 
1147 	/* Reset per napi packets/bytes */
1148 	tx_ring->per_napi_packets = 0;
1149 	tx_ring->per_napi_bytes = 0;
1150 	rx_ring->per_napi_packets = 0;
1151 	rx_ring->per_napi_bytes = 0;
1152 }
1153 
1154 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1155 					struct ena_ring *rx_ring)
1156 {
1157 	struct ena_eth_io_intr_reg intr_reg;
1158 
1159 	/* Update intr register: rx intr delay,
1160 	 * tx intr delay and interrupt unmask
1161 	 */
1162 	ena_com_update_intr_reg(&intr_reg,
1163 				rx_ring->smoothed_interval,
1164 				tx_ring->smoothed_interval,
1165 				true);
1166 
1167 	/* It is a shared MSI-X.
1168 	 * Tx and Rx CQ have pointer to it.
1169 	 * So we use one of them to reach the intr reg
1170 	 */
1171 	ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1172 }
1173 
1174 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1175 					     struct ena_ring *rx_ring)
1176 {
1177 	int cpu = get_cpu();
1178 	int numa_node;
1179 
1180 	/* Check only one ring since the 2 rings are running on the same cpu */
1181 	if (likely(tx_ring->cpu == cpu))
1182 		goto out;
1183 
1184 	numa_node = cpu_to_node(cpu);
1185 	put_cpu();
1186 
1187 	if (numa_node != NUMA_NO_NODE) {
1188 		ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1189 		ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1190 	}
1191 
1192 	tx_ring->cpu = cpu;
1193 	rx_ring->cpu = cpu;
1194 
1195 	return;
1196 out:
1197 	put_cpu();
1198 }
1199 
1200 static int ena_io_poll(struct napi_struct *napi, int budget)
1201 {
1202 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1203 	struct ena_ring *tx_ring, *rx_ring;
1204 
1205 	u32 tx_work_done;
1206 	u32 rx_work_done;
1207 	int tx_budget;
1208 	int napi_comp_call = 0;
1209 	int ret;
1210 
1211 	tx_ring = ena_napi->tx_ring;
1212 	rx_ring = ena_napi->rx_ring;
1213 
1214 	tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1215 
1216 	if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1217 	    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1218 		napi_complete_done(napi, 0);
1219 		return 0;
1220 	}
1221 
1222 	tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1223 	rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1224 
1225 	/* If the device is about to reset or down, avoid unmask
1226 	 * the interrupt and return 0 so NAPI won't reschedule
1227 	 */
1228 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1229 		     test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1230 		napi_complete_done(napi, 0);
1231 		ret = 0;
1232 
1233 	} else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1234 		napi_comp_call = 1;
1235 
1236 		/* Update numa and unmask the interrupt only when schedule
1237 		 * from the interrupt context (vs from sk_busy_loop)
1238 		 */
1239 		if (napi_complete_done(napi, rx_work_done)) {
1240 			/* Tx and Rx share the same interrupt vector */
1241 			if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1242 				ena_adjust_intr_moderation(rx_ring, tx_ring);
1243 
1244 			ena_unmask_interrupt(tx_ring, rx_ring);
1245 		}
1246 
1247 		ena_update_ring_numa_node(tx_ring, rx_ring);
1248 
1249 		ret = rx_work_done;
1250 	} else {
1251 		ret = budget;
1252 	}
1253 
1254 	u64_stats_update_begin(&tx_ring->syncp);
1255 	tx_ring->tx_stats.napi_comp += napi_comp_call;
1256 	tx_ring->tx_stats.tx_poll++;
1257 	u64_stats_update_end(&tx_ring->syncp);
1258 
1259 	return ret;
1260 }
1261 
1262 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1263 {
1264 	struct ena_adapter *adapter = (struct ena_adapter *)data;
1265 
1266 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1267 
1268 	/* Don't call the aenq handler before probe is done */
1269 	if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1270 		ena_com_aenq_intr_handler(adapter->ena_dev, data);
1271 
1272 	return IRQ_HANDLED;
1273 }
1274 
1275 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1276  * @irq: interrupt number
1277  * @data: pointer to a network interface private napi device structure
1278  */
1279 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1280 {
1281 	struct ena_napi *ena_napi = data;
1282 
1283 	ena_napi->tx_ring->first_interrupt = true;
1284 	ena_napi->rx_ring->first_interrupt = true;
1285 
1286 	napi_schedule_irqoff(&ena_napi->napi);
1287 
1288 	return IRQ_HANDLED;
1289 }
1290 
1291 /* Reserve a single MSI-X vector for management (admin + aenq).
1292  * plus reserve one vector for each potential io queue.
1293  * the number of potential io queues is the minimum of what the device
1294  * supports and the number of vCPUs.
1295  */
1296 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1297 {
1298 	int msix_vecs, irq_cnt;
1299 
1300 	if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1301 		netif_err(adapter, probe, adapter->netdev,
1302 			  "Error, MSI-X is already enabled\n");
1303 		return -EPERM;
1304 	}
1305 
1306 	/* Reserved the max msix vectors we might need */
1307 	msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1308 
1309 	netif_dbg(adapter, probe, adapter->netdev,
1310 		  "trying to enable MSI-X, vectors %d\n", msix_vecs);
1311 
1312 	irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1313 					msix_vecs, PCI_IRQ_MSIX);
1314 
1315 	if (irq_cnt < 0) {
1316 		netif_err(adapter, probe, adapter->netdev,
1317 			  "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1318 		return -ENOSPC;
1319 	}
1320 
1321 	if (irq_cnt != msix_vecs) {
1322 		netif_notice(adapter, probe, adapter->netdev,
1323 			     "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1324 			     irq_cnt, msix_vecs);
1325 		adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1326 	}
1327 
1328 	if (ena_init_rx_cpu_rmap(adapter))
1329 		netif_warn(adapter, probe, adapter->netdev,
1330 			   "Failed to map IRQs to CPUs\n");
1331 
1332 	adapter->msix_vecs = irq_cnt;
1333 	set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1334 
1335 	return 0;
1336 }
1337 
1338 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1339 {
1340 	u32 cpu;
1341 
1342 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1343 		 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1344 		 pci_name(adapter->pdev));
1345 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1346 		ena_intr_msix_mgmnt;
1347 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1348 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1349 		pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1350 	cpu = cpumask_first(cpu_online_mask);
1351 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1352 	cpumask_set_cpu(cpu,
1353 			&adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1354 }
1355 
1356 static void ena_setup_io_intr(struct ena_adapter *adapter)
1357 {
1358 	struct net_device *netdev;
1359 	int irq_idx, i, cpu;
1360 
1361 	netdev = adapter->netdev;
1362 
1363 	for (i = 0; i < adapter->num_queues; i++) {
1364 		irq_idx = ENA_IO_IRQ_IDX(i);
1365 		cpu = i % num_online_cpus();
1366 
1367 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1368 			 "%s-Tx-Rx-%d", netdev->name, i);
1369 		adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1370 		adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1371 		adapter->irq_tbl[irq_idx].vector =
1372 			pci_irq_vector(adapter->pdev, irq_idx);
1373 		adapter->irq_tbl[irq_idx].cpu = cpu;
1374 
1375 		cpumask_set_cpu(cpu,
1376 				&adapter->irq_tbl[irq_idx].affinity_hint_mask);
1377 	}
1378 }
1379 
1380 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1381 {
1382 	unsigned long flags = 0;
1383 	struct ena_irq *irq;
1384 	int rc;
1385 
1386 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1387 	rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1388 			 irq->data);
1389 	if (rc) {
1390 		netif_err(adapter, probe, adapter->netdev,
1391 			  "failed to request admin irq\n");
1392 		return rc;
1393 	}
1394 
1395 	netif_dbg(adapter, probe, adapter->netdev,
1396 		  "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1397 		  irq->affinity_hint_mask.bits[0], irq->vector);
1398 
1399 	irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1400 
1401 	return rc;
1402 }
1403 
1404 static int ena_request_io_irq(struct ena_adapter *adapter)
1405 {
1406 	unsigned long flags = 0;
1407 	struct ena_irq *irq;
1408 	int rc = 0, i, k;
1409 
1410 	if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1411 		netif_err(adapter, ifup, adapter->netdev,
1412 			  "Failed to request I/O IRQ: MSI-X is not enabled\n");
1413 		return -EINVAL;
1414 	}
1415 
1416 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1417 		irq = &adapter->irq_tbl[i];
1418 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1419 				 irq->data);
1420 		if (rc) {
1421 			netif_err(adapter, ifup, adapter->netdev,
1422 				  "Failed to request I/O IRQ. index %d rc %d\n",
1423 				   i, rc);
1424 			goto err;
1425 		}
1426 
1427 		netif_dbg(adapter, ifup, adapter->netdev,
1428 			  "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1429 			  i, irq->affinity_hint_mask.bits[0], irq->vector);
1430 
1431 		irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1432 	}
1433 
1434 	return rc;
1435 
1436 err:
1437 	for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1438 		irq = &adapter->irq_tbl[k];
1439 		free_irq(irq->vector, irq->data);
1440 	}
1441 
1442 	return rc;
1443 }
1444 
1445 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1446 {
1447 	struct ena_irq *irq;
1448 
1449 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1450 	synchronize_irq(irq->vector);
1451 	irq_set_affinity_hint(irq->vector, NULL);
1452 	free_irq(irq->vector, irq->data);
1453 }
1454 
1455 static void ena_free_io_irq(struct ena_adapter *adapter)
1456 {
1457 	struct ena_irq *irq;
1458 	int i;
1459 
1460 #ifdef CONFIG_RFS_ACCEL
1461 	if (adapter->msix_vecs >= 1) {
1462 		free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1463 		adapter->netdev->rx_cpu_rmap = NULL;
1464 	}
1465 #endif /* CONFIG_RFS_ACCEL */
1466 
1467 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1468 		irq = &adapter->irq_tbl[i];
1469 		irq_set_affinity_hint(irq->vector, NULL);
1470 		free_irq(irq->vector, irq->data);
1471 	}
1472 }
1473 
1474 static void ena_disable_msix(struct ena_adapter *adapter)
1475 {
1476 	if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1477 		pci_free_irq_vectors(adapter->pdev);
1478 }
1479 
1480 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1481 {
1482 	int i;
1483 
1484 	if (!netif_running(adapter->netdev))
1485 		return;
1486 
1487 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1488 		synchronize_irq(adapter->irq_tbl[i].vector);
1489 }
1490 
1491 static void ena_del_napi(struct ena_adapter *adapter)
1492 {
1493 	int i;
1494 
1495 	for (i = 0; i < adapter->num_queues; i++)
1496 		netif_napi_del(&adapter->ena_napi[i].napi);
1497 }
1498 
1499 static void ena_init_napi(struct ena_adapter *adapter)
1500 {
1501 	struct ena_napi *napi;
1502 	int i;
1503 
1504 	for (i = 0; i < adapter->num_queues; i++) {
1505 		napi = &adapter->ena_napi[i];
1506 
1507 		netif_napi_add(adapter->netdev,
1508 			       &adapter->ena_napi[i].napi,
1509 			       ena_io_poll,
1510 			       ENA_NAPI_BUDGET);
1511 		napi->rx_ring = &adapter->rx_ring[i];
1512 		napi->tx_ring = &adapter->tx_ring[i];
1513 		napi->qid = i;
1514 	}
1515 }
1516 
1517 static void ena_napi_disable_all(struct ena_adapter *adapter)
1518 {
1519 	int i;
1520 
1521 	for (i = 0; i < adapter->num_queues; i++)
1522 		napi_disable(&adapter->ena_napi[i].napi);
1523 }
1524 
1525 static void ena_napi_enable_all(struct ena_adapter *adapter)
1526 {
1527 	int i;
1528 
1529 	for (i = 0; i < adapter->num_queues; i++)
1530 		napi_enable(&adapter->ena_napi[i].napi);
1531 }
1532 
1533 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1534 {
1535 	adapter->tx_usecs = 0;
1536 	adapter->rx_usecs = 0;
1537 	adapter->tx_frames = 1;
1538 	adapter->rx_frames = 1;
1539 }
1540 
1541 /* Configure the Rx forwarding */
1542 static int ena_rss_configure(struct ena_adapter *adapter)
1543 {
1544 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1545 	int rc;
1546 
1547 	/* In case the RSS table wasn't initialized by probe */
1548 	if (!ena_dev->rss.tbl_log_size) {
1549 		rc = ena_rss_init_default(adapter);
1550 		if (rc && (rc != -EOPNOTSUPP)) {
1551 			netif_err(adapter, ifup, adapter->netdev,
1552 				  "Failed to init RSS rc: %d\n", rc);
1553 			return rc;
1554 		}
1555 	}
1556 
1557 	/* Set indirect table */
1558 	rc = ena_com_indirect_table_set(ena_dev);
1559 	if (unlikely(rc && rc != -EOPNOTSUPP))
1560 		return rc;
1561 
1562 	/* Configure hash function (if supported) */
1563 	rc = ena_com_set_hash_function(ena_dev);
1564 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
1565 		return rc;
1566 
1567 	/* Configure hash inputs (if supported) */
1568 	rc = ena_com_set_hash_ctrl(ena_dev);
1569 	if (unlikely(rc && (rc != -EOPNOTSUPP)))
1570 		return rc;
1571 
1572 	return 0;
1573 }
1574 
1575 static int ena_up_complete(struct ena_adapter *adapter)
1576 {
1577 	int rc;
1578 
1579 	rc = ena_rss_configure(adapter);
1580 	if (rc)
1581 		return rc;
1582 
1583 	ena_init_napi(adapter);
1584 
1585 	ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1586 
1587 	ena_refill_all_rx_bufs(adapter);
1588 
1589 	/* enable transmits */
1590 	netif_tx_start_all_queues(adapter->netdev);
1591 
1592 	ena_restore_ethtool_params(adapter);
1593 
1594 	ena_napi_enable_all(adapter);
1595 
1596 	return 0;
1597 }
1598 
1599 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1600 {
1601 	struct ena_com_create_io_ctx ctx = { 0 };
1602 	struct ena_com_dev *ena_dev;
1603 	struct ena_ring *tx_ring;
1604 	u32 msix_vector;
1605 	u16 ena_qid;
1606 	int rc;
1607 
1608 	ena_dev = adapter->ena_dev;
1609 
1610 	tx_ring = &adapter->tx_ring[qid];
1611 	msix_vector = ENA_IO_IRQ_IDX(qid);
1612 	ena_qid = ENA_IO_TXQ_IDX(qid);
1613 
1614 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1615 	ctx.qid = ena_qid;
1616 	ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1617 	ctx.msix_vector = msix_vector;
1618 	ctx.queue_size = adapter->tx_ring_size;
1619 	ctx.numa_node = cpu_to_node(tx_ring->cpu);
1620 
1621 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1622 	if (rc) {
1623 		netif_err(adapter, ifup, adapter->netdev,
1624 			  "Failed to create I/O TX queue num %d rc: %d\n",
1625 			  qid, rc);
1626 		return rc;
1627 	}
1628 
1629 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1630 				     &tx_ring->ena_com_io_sq,
1631 				     &tx_ring->ena_com_io_cq);
1632 	if (rc) {
1633 		netif_err(adapter, ifup, adapter->netdev,
1634 			  "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1635 			  qid, rc);
1636 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1637 		return rc;
1638 	}
1639 
1640 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1641 	return rc;
1642 }
1643 
1644 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1645 {
1646 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1647 	int rc, i;
1648 
1649 	for (i = 0; i < adapter->num_queues; i++) {
1650 		rc = ena_create_io_tx_queue(adapter, i);
1651 		if (rc)
1652 			goto create_err;
1653 	}
1654 
1655 	return 0;
1656 
1657 create_err:
1658 	while (i--)
1659 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1660 
1661 	return rc;
1662 }
1663 
1664 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1665 {
1666 	struct ena_com_dev *ena_dev;
1667 	struct ena_com_create_io_ctx ctx = { 0 };
1668 	struct ena_ring *rx_ring;
1669 	u32 msix_vector;
1670 	u16 ena_qid;
1671 	int rc;
1672 
1673 	ena_dev = adapter->ena_dev;
1674 
1675 	rx_ring = &adapter->rx_ring[qid];
1676 	msix_vector = ENA_IO_IRQ_IDX(qid);
1677 	ena_qid = ENA_IO_RXQ_IDX(qid);
1678 
1679 	ctx.qid = ena_qid;
1680 	ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1681 	ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1682 	ctx.msix_vector = msix_vector;
1683 	ctx.queue_size = adapter->rx_ring_size;
1684 	ctx.numa_node = cpu_to_node(rx_ring->cpu);
1685 
1686 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1687 	if (rc) {
1688 		netif_err(adapter, ifup, adapter->netdev,
1689 			  "Failed to create I/O RX queue num %d rc: %d\n",
1690 			  qid, rc);
1691 		return rc;
1692 	}
1693 
1694 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1695 				     &rx_ring->ena_com_io_sq,
1696 				     &rx_ring->ena_com_io_cq);
1697 	if (rc) {
1698 		netif_err(adapter, ifup, adapter->netdev,
1699 			  "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1700 			  qid, rc);
1701 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1702 		return rc;
1703 	}
1704 
1705 	ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1706 
1707 	return rc;
1708 }
1709 
1710 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1711 {
1712 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1713 	int rc, i;
1714 
1715 	for (i = 0; i < adapter->num_queues; i++) {
1716 		rc = ena_create_io_rx_queue(adapter, i);
1717 		if (rc)
1718 			goto create_err;
1719 	}
1720 
1721 	return 0;
1722 
1723 create_err:
1724 	while (i--)
1725 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1726 
1727 	return rc;
1728 }
1729 
1730 static int ena_up(struct ena_adapter *adapter)
1731 {
1732 	int rc, i;
1733 
1734 	netdev_dbg(adapter->netdev, "%s\n", __func__);
1735 
1736 	ena_setup_io_intr(adapter);
1737 
1738 	rc = ena_request_io_irq(adapter);
1739 	if (rc)
1740 		goto err_req_irq;
1741 
1742 	/* allocate transmit descriptors */
1743 	rc = ena_setup_all_tx_resources(adapter);
1744 	if (rc)
1745 		goto err_setup_tx;
1746 
1747 	/* allocate receive descriptors */
1748 	rc = ena_setup_all_rx_resources(adapter);
1749 	if (rc)
1750 		goto err_setup_rx;
1751 
1752 	/* Create TX queues */
1753 	rc = ena_create_all_io_tx_queues(adapter);
1754 	if (rc)
1755 		goto err_create_tx_queues;
1756 
1757 	/* Create RX queues */
1758 	rc = ena_create_all_io_rx_queues(adapter);
1759 	if (rc)
1760 		goto err_create_rx_queues;
1761 
1762 	rc = ena_up_complete(adapter);
1763 	if (rc)
1764 		goto err_up;
1765 
1766 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1767 		netif_carrier_on(adapter->netdev);
1768 
1769 	u64_stats_update_begin(&adapter->syncp);
1770 	adapter->dev_stats.interface_up++;
1771 	u64_stats_update_end(&adapter->syncp);
1772 
1773 	set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1774 
1775 	/* Enable completion queues interrupt */
1776 	for (i = 0; i < adapter->num_queues; i++)
1777 		ena_unmask_interrupt(&adapter->tx_ring[i],
1778 				     &adapter->rx_ring[i]);
1779 
1780 	/* schedule napi in case we had pending packets
1781 	 * from the last time we disable napi
1782 	 */
1783 	for (i = 0; i < adapter->num_queues; i++)
1784 		napi_schedule(&adapter->ena_napi[i].napi);
1785 
1786 	return rc;
1787 
1788 err_up:
1789 	ena_destroy_all_rx_queues(adapter);
1790 err_create_rx_queues:
1791 	ena_destroy_all_tx_queues(adapter);
1792 err_create_tx_queues:
1793 	ena_free_all_io_rx_resources(adapter);
1794 err_setup_rx:
1795 	ena_free_all_io_tx_resources(adapter);
1796 err_setup_tx:
1797 	ena_free_io_irq(adapter);
1798 err_req_irq:
1799 
1800 	return rc;
1801 }
1802 
1803 static void ena_down(struct ena_adapter *adapter)
1804 {
1805 	netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1806 
1807 	clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1808 
1809 	u64_stats_update_begin(&adapter->syncp);
1810 	adapter->dev_stats.interface_down++;
1811 	u64_stats_update_end(&adapter->syncp);
1812 
1813 	netif_carrier_off(adapter->netdev);
1814 	netif_tx_disable(adapter->netdev);
1815 
1816 	/* After this point the napi handler won't enable the tx queue */
1817 	ena_napi_disable_all(adapter);
1818 
1819 	/* After destroy the queue there won't be any new interrupts */
1820 
1821 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1822 		int rc;
1823 
1824 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1825 		if (rc)
1826 			dev_err(&adapter->pdev->dev, "Device reset failed\n");
1827 	}
1828 
1829 	ena_destroy_all_io_queues(adapter);
1830 
1831 	ena_disable_io_intr_sync(adapter);
1832 	ena_free_io_irq(adapter);
1833 	ena_del_napi(adapter);
1834 
1835 	ena_free_all_tx_bufs(adapter);
1836 	ena_free_all_rx_bufs(adapter);
1837 	ena_free_all_io_tx_resources(adapter);
1838 	ena_free_all_io_rx_resources(adapter);
1839 }
1840 
1841 /* ena_open - Called when a network interface is made active
1842  * @netdev: network interface device structure
1843  *
1844  * Returns 0 on success, negative value on failure
1845  *
1846  * The open entry point is called when a network interface is made
1847  * active by the system (IFF_UP).  At this point all resources needed
1848  * for transmit and receive operations are allocated, the interrupt
1849  * handler is registered with the OS, the watchdog timer is started,
1850  * and the stack is notified that the interface is ready.
1851  */
1852 static int ena_open(struct net_device *netdev)
1853 {
1854 	struct ena_adapter *adapter = netdev_priv(netdev);
1855 	int rc;
1856 
1857 	/* Notify the stack of the actual queue counts. */
1858 	rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1859 	if (rc) {
1860 		netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1861 		return rc;
1862 	}
1863 
1864 	rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1865 	if (rc) {
1866 		netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1867 		return rc;
1868 	}
1869 
1870 	rc = ena_up(adapter);
1871 	if (rc)
1872 		return rc;
1873 
1874 	return rc;
1875 }
1876 
1877 /* ena_close - Disables a network interface
1878  * @netdev: network interface device structure
1879  *
1880  * Returns 0, this is not allowed to fail
1881  *
1882  * The close entry point is called when an interface is de-activated
1883  * by the OS.  The hardware is still under the drivers control, but
1884  * needs to be disabled.  A global MAC reset is issued to stop the
1885  * hardware, and all transmit and receive resources are freed.
1886  */
1887 static int ena_close(struct net_device *netdev)
1888 {
1889 	struct ena_adapter *adapter = netdev_priv(netdev);
1890 
1891 	netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1892 
1893 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1894 		ena_down(adapter);
1895 
1896 	/* Check for device status and issue reset if needed*/
1897 	check_for_admin_com_state(adapter);
1898 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1899 		netif_err(adapter, ifdown, adapter->netdev,
1900 			  "Destroy failure, restarting device\n");
1901 		ena_dump_stats_to_dmesg(adapter);
1902 		/* rtnl lock already obtained in dev_ioctl() layer */
1903 		ena_destroy_device(adapter);
1904 		ena_restore_device(adapter);
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1911 {
1912 	u32 mss = skb_shinfo(skb)->gso_size;
1913 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1914 	u8 l4_protocol = 0;
1915 
1916 	if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1917 		ena_tx_ctx->l4_csum_enable = 1;
1918 		if (mss) {
1919 			ena_tx_ctx->tso_enable = 1;
1920 			ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1921 			ena_tx_ctx->l4_csum_partial = 0;
1922 		} else {
1923 			ena_tx_ctx->tso_enable = 0;
1924 			ena_meta->l4_hdr_len = 0;
1925 			ena_tx_ctx->l4_csum_partial = 1;
1926 		}
1927 
1928 		switch (ip_hdr(skb)->version) {
1929 		case IPVERSION:
1930 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1931 			if (ip_hdr(skb)->frag_off & htons(IP_DF))
1932 				ena_tx_ctx->df = 1;
1933 			if (mss)
1934 				ena_tx_ctx->l3_csum_enable = 1;
1935 			l4_protocol = ip_hdr(skb)->protocol;
1936 			break;
1937 		case 6:
1938 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1939 			l4_protocol = ipv6_hdr(skb)->nexthdr;
1940 			break;
1941 		default:
1942 			break;
1943 		}
1944 
1945 		if (l4_protocol == IPPROTO_TCP)
1946 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1947 		else
1948 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1949 
1950 		ena_meta->mss = mss;
1951 		ena_meta->l3_hdr_len = skb_network_header_len(skb);
1952 		ena_meta->l3_hdr_offset = skb_network_offset(skb);
1953 		ena_tx_ctx->meta_valid = 1;
1954 
1955 	} else {
1956 		ena_tx_ctx->meta_valid = 0;
1957 	}
1958 }
1959 
1960 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1961 				       struct sk_buff *skb)
1962 {
1963 	int num_frags, header_len, rc;
1964 
1965 	num_frags = skb_shinfo(skb)->nr_frags;
1966 	header_len = skb_headlen(skb);
1967 
1968 	if (num_frags < tx_ring->sgl_size)
1969 		return 0;
1970 
1971 	if ((num_frags == tx_ring->sgl_size) &&
1972 	    (header_len < tx_ring->tx_max_header_size))
1973 		return 0;
1974 
1975 	u64_stats_update_begin(&tx_ring->syncp);
1976 	tx_ring->tx_stats.linearize++;
1977 	u64_stats_update_end(&tx_ring->syncp);
1978 
1979 	rc = skb_linearize(skb);
1980 	if (unlikely(rc)) {
1981 		u64_stats_update_begin(&tx_ring->syncp);
1982 		tx_ring->tx_stats.linearize_failed++;
1983 		u64_stats_update_end(&tx_ring->syncp);
1984 	}
1985 
1986 	return rc;
1987 }
1988 
1989 /* Called with netif_tx_lock. */
1990 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1991 {
1992 	struct ena_adapter *adapter = netdev_priv(dev);
1993 	struct ena_tx_buffer *tx_info;
1994 	struct ena_com_tx_ctx ena_tx_ctx;
1995 	struct ena_ring *tx_ring;
1996 	struct netdev_queue *txq;
1997 	struct ena_com_buf *ena_buf;
1998 	void *push_hdr;
1999 	u32 len, last_frag;
2000 	u16 next_to_use;
2001 	u16 req_id;
2002 	u16 push_len;
2003 	u16 header_len;
2004 	dma_addr_t dma;
2005 	int qid, rc, nb_hw_desc;
2006 	int i = -1;
2007 
2008 	netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2009 	/*  Determine which tx ring we will be placed on */
2010 	qid = skb_get_queue_mapping(skb);
2011 	tx_ring = &adapter->tx_ring[qid];
2012 	txq = netdev_get_tx_queue(dev, qid);
2013 
2014 	rc = ena_check_and_linearize_skb(tx_ring, skb);
2015 	if (unlikely(rc))
2016 		goto error_drop_packet;
2017 
2018 	skb_tx_timestamp(skb);
2019 	len = skb_headlen(skb);
2020 
2021 	next_to_use = tx_ring->next_to_use;
2022 	req_id = tx_ring->free_tx_ids[next_to_use];
2023 	tx_info = &tx_ring->tx_buffer_info[req_id];
2024 	tx_info->num_of_bufs = 0;
2025 
2026 	WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2027 	ena_buf = tx_info->bufs;
2028 	tx_info->skb = skb;
2029 
2030 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2031 		/* prepared the push buffer */
2032 		push_len = min_t(u32, len, tx_ring->tx_max_header_size);
2033 		header_len = push_len;
2034 		push_hdr = skb->data;
2035 	} else {
2036 		push_len = 0;
2037 		header_len = min_t(u32, len, tx_ring->tx_max_header_size);
2038 		push_hdr = NULL;
2039 	}
2040 
2041 	netif_dbg(adapter, tx_queued, dev,
2042 		  "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2043 		  push_hdr, push_len);
2044 
2045 	if (len > push_len) {
2046 		dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2047 				     len - push_len, DMA_TO_DEVICE);
2048 		if (dma_mapping_error(tx_ring->dev, dma))
2049 			goto error_report_dma_error;
2050 
2051 		ena_buf->paddr = dma;
2052 		ena_buf->len = len - push_len;
2053 
2054 		ena_buf++;
2055 		tx_info->num_of_bufs++;
2056 	}
2057 
2058 	last_frag = skb_shinfo(skb)->nr_frags;
2059 
2060 	for (i = 0; i < last_frag; i++) {
2061 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2062 
2063 		len = skb_frag_size(frag);
2064 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
2065 				       DMA_TO_DEVICE);
2066 		if (dma_mapping_error(tx_ring->dev, dma))
2067 			goto error_report_dma_error;
2068 
2069 		ena_buf->paddr = dma;
2070 		ena_buf->len = len;
2071 		ena_buf++;
2072 	}
2073 
2074 	tx_info->num_of_bufs += last_frag;
2075 
2076 	memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2077 	ena_tx_ctx.ena_bufs = tx_info->bufs;
2078 	ena_tx_ctx.push_header = push_hdr;
2079 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2080 	ena_tx_ctx.req_id = req_id;
2081 	ena_tx_ctx.header_len = header_len;
2082 
2083 	/* set flags and meta data */
2084 	ena_tx_csum(&ena_tx_ctx, skb);
2085 
2086 	/* prepare the packet's descriptors to dma engine */
2087 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2088 				&nb_hw_desc);
2089 
2090 	if (unlikely(rc)) {
2091 		netif_err(adapter, tx_queued, dev,
2092 			  "failed to prepare tx bufs\n");
2093 		u64_stats_update_begin(&tx_ring->syncp);
2094 		tx_ring->tx_stats.queue_stop++;
2095 		tx_ring->tx_stats.prepare_ctx_err++;
2096 		u64_stats_update_end(&tx_ring->syncp);
2097 		netif_tx_stop_queue(txq);
2098 		goto error_unmap_dma;
2099 	}
2100 
2101 	netdev_tx_sent_queue(txq, skb->len);
2102 
2103 	u64_stats_update_begin(&tx_ring->syncp);
2104 	tx_ring->tx_stats.cnt++;
2105 	tx_ring->tx_stats.bytes += skb->len;
2106 	u64_stats_update_end(&tx_ring->syncp);
2107 
2108 	tx_info->tx_descs = nb_hw_desc;
2109 	tx_info->last_jiffies = jiffies;
2110 	tx_info->print_once = 0;
2111 
2112 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113 		tx_ring->ring_size);
2114 
2115 	/* This WMB is aimed to:
2116 	 * 1 - perform smp barrier before reading next_to_completion
2117 	 * 2 - make sure the desc were written before trigger DB
2118 	 */
2119 	wmb();
2120 
2121 	/* stop the queue when no more space available, the packet can have up
2122 	 * to sgl_size + 2. one for the meta descriptor and one for header
2123 	 * (if the header is larger than tx_max_header_size).
2124 	 */
2125 	if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2126 		     (tx_ring->sgl_size + 2))) {
2127 		netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2128 			  __func__, qid);
2129 
2130 		netif_tx_stop_queue(txq);
2131 		u64_stats_update_begin(&tx_ring->syncp);
2132 		tx_ring->tx_stats.queue_stop++;
2133 		u64_stats_update_end(&tx_ring->syncp);
2134 
2135 		/* There is a rare condition where this function decide to
2136 		 * stop the queue but meanwhile clean_tx_irq updates
2137 		 * next_to_completion and terminates.
2138 		 * The queue will remain stopped forever.
2139 		 * To solve this issue this function perform rmb, check
2140 		 * the wakeup condition and wake up the queue if needed.
2141 		 */
2142 		smp_rmb();
2143 
2144 		if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2145 				> ENA_TX_WAKEUP_THRESH) {
2146 			netif_tx_wake_queue(txq);
2147 			u64_stats_update_begin(&tx_ring->syncp);
2148 			tx_ring->tx_stats.queue_wakeup++;
2149 			u64_stats_update_end(&tx_ring->syncp);
2150 		}
2151 	}
2152 
2153 	if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154 		/* trigger the dma engine */
2155 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
2156 		u64_stats_update_begin(&tx_ring->syncp);
2157 		tx_ring->tx_stats.doorbells++;
2158 		u64_stats_update_end(&tx_ring->syncp);
2159 	}
2160 
2161 	return NETDEV_TX_OK;
2162 
2163 error_report_dma_error:
2164 	u64_stats_update_begin(&tx_ring->syncp);
2165 	tx_ring->tx_stats.dma_mapping_err++;
2166 	u64_stats_update_end(&tx_ring->syncp);
2167 	netdev_warn(adapter->netdev, "failed to map skb\n");
2168 
2169 	tx_info->skb = NULL;
2170 
2171 error_unmap_dma:
2172 	if (i >= 0) {
2173 		/* save value of frag that failed */
2174 		last_frag = i;
2175 
2176 		/* start back at beginning and unmap skb */
2177 		tx_info->skb = NULL;
2178 		ena_buf = tx_info->bufs;
2179 		dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2180 				 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2181 
2182 		/* unmap remaining mapped pages */
2183 		for (i = 0; i < last_frag; i++) {
2184 			ena_buf++;
2185 			dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2186 				       dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2187 		}
2188 	}
2189 
2190 error_drop_packet:
2191 
2192 	dev_kfree_skb(skb);
2193 	return NETDEV_TX_OK;
2194 }
2195 
2196 #ifdef CONFIG_NET_POLL_CONTROLLER
2197 static void ena_netpoll(struct net_device *netdev)
2198 {
2199 	struct ena_adapter *adapter = netdev_priv(netdev);
2200 	int i;
2201 
2202 	/* Dont schedule NAPI if the driver is in the middle of reset
2203 	 * or netdev is down.
2204 	 */
2205 
2206 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2207 	    test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2208 		return;
2209 
2210 	for (i = 0; i < adapter->num_queues; i++)
2211 		napi_schedule(&adapter->ena_napi[i].napi);
2212 }
2213 #endif /* CONFIG_NET_POLL_CONTROLLER */
2214 
2215 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2216 			    void *accel_priv, select_queue_fallback_t fallback)
2217 {
2218 	u16 qid;
2219 	/* we suspect that this is good for in--kernel network services that
2220 	 * want to loop incoming skb rx to tx in normal user generated traffic,
2221 	 * most probably we will not get to this
2222 	 */
2223 	if (skb_rx_queue_recorded(skb))
2224 		qid = skb_get_rx_queue(skb);
2225 	else
2226 		qid = fallback(dev, skb);
2227 
2228 	return qid;
2229 }
2230 
2231 static void ena_config_host_info(struct ena_com_dev *ena_dev)
2232 {
2233 	struct ena_admin_host_info *host_info;
2234 	int rc;
2235 
2236 	/* Allocate only the host info */
2237 	rc = ena_com_allocate_host_info(ena_dev);
2238 	if (rc) {
2239 		pr_err("Cannot allocate host info\n");
2240 		return;
2241 	}
2242 
2243 	host_info = ena_dev->host_attr.host_info;
2244 
2245 	host_info->os_type = ENA_ADMIN_OS_LINUX;
2246 	host_info->kernel_ver = LINUX_VERSION_CODE;
2247 	strncpy(host_info->kernel_ver_str, utsname()->version,
2248 		sizeof(host_info->kernel_ver_str) - 1);
2249 	host_info->os_dist = 0;
2250 	strncpy(host_info->os_dist_str, utsname()->release,
2251 		sizeof(host_info->os_dist_str) - 1);
2252 	host_info->driver_version =
2253 		(DRV_MODULE_VER_MAJOR) |
2254 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2255 		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2256 
2257 	rc = ena_com_set_host_attributes(ena_dev);
2258 	if (rc) {
2259 		if (rc == -EOPNOTSUPP)
2260 			pr_warn("Cannot set host attributes\n");
2261 		else
2262 			pr_err("Cannot set host attributes\n");
2263 
2264 		goto err;
2265 	}
2266 
2267 	return;
2268 
2269 err:
2270 	ena_com_delete_host_info(ena_dev);
2271 }
2272 
2273 static void ena_config_debug_area(struct ena_adapter *adapter)
2274 {
2275 	u32 debug_area_size;
2276 	int rc, ss_count;
2277 
2278 	ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2279 	if (ss_count <= 0) {
2280 		netif_err(adapter, drv, adapter->netdev,
2281 			  "SS count is negative\n");
2282 		return;
2283 	}
2284 
2285 	/* allocate 32 bytes for each string and 64bit for the value */
2286 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2287 
2288 	rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2289 	if (rc) {
2290 		pr_err("Cannot allocate debug area\n");
2291 		return;
2292 	}
2293 
2294 	rc = ena_com_set_host_attributes(adapter->ena_dev);
2295 	if (rc) {
2296 		if (rc == -EOPNOTSUPP)
2297 			netif_warn(adapter, drv, adapter->netdev,
2298 				   "Cannot set host attributes\n");
2299 		else
2300 			netif_err(adapter, drv, adapter->netdev,
2301 				  "Cannot set host attributes\n");
2302 		goto err;
2303 	}
2304 
2305 	return;
2306 err:
2307 	ena_com_delete_debug_area(adapter->ena_dev);
2308 }
2309 
2310 static void ena_get_stats64(struct net_device *netdev,
2311 			    struct rtnl_link_stats64 *stats)
2312 {
2313 	struct ena_adapter *adapter = netdev_priv(netdev);
2314 	struct ena_ring *rx_ring, *tx_ring;
2315 	unsigned int start;
2316 	u64 rx_drops;
2317 	int i;
2318 
2319 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2320 		return;
2321 
2322 	for (i = 0; i < adapter->num_queues; i++) {
2323 		u64 bytes, packets;
2324 
2325 		tx_ring = &adapter->tx_ring[i];
2326 
2327 		do {
2328 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2329 			packets = tx_ring->tx_stats.cnt;
2330 			bytes = tx_ring->tx_stats.bytes;
2331 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2332 
2333 		stats->tx_packets += packets;
2334 		stats->tx_bytes += bytes;
2335 
2336 		rx_ring = &adapter->rx_ring[i];
2337 
2338 		do {
2339 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2340 			packets = rx_ring->rx_stats.cnt;
2341 			bytes = rx_ring->rx_stats.bytes;
2342 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2343 
2344 		stats->rx_packets += packets;
2345 		stats->rx_bytes += bytes;
2346 	}
2347 
2348 	do {
2349 		start = u64_stats_fetch_begin_irq(&adapter->syncp);
2350 		rx_drops = adapter->dev_stats.rx_drops;
2351 	} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2352 
2353 	stats->rx_dropped = rx_drops;
2354 
2355 	stats->multicast = 0;
2356 	stats->collisions = 0;
2357 
2358 	stats->rx_length_errors = 0;
2359 	stats->rx_crc_errors = 0;
2360 	stats->rx_frame_errors = 0;
2361 	stats->rx_fifo_errors = 0;
2362 	stats->rx_missed_errors = 0;
2363 	stats->tx_window_errors = 0;
2364 
2365 	stats->rx_errors = 0;
2366 	stats->tx_errors = 0;
2367 }
2368 
2369 static const struct net_device_ops ena_netdev_ops = {
2370 	.ndo_open		= ena_open,
2371 	.ndo_stop		= ena_close,
2372 	.ndo_start_xmit		= ena_start_xmit,
2373 	.ndo_select_queue	= ena_select_queue,
2374 	.ndo_get_stats64	= ena_get_stats64,
2375 	.ndo_tx_timeout		= ena_tx_timeout,
2376 	.ndo_change_mtu		= ena_change_mtu,
2377 	.ndo_set_mac_address	= NULL,
2378 	.ndo_validate_addr	= eth_validate_addr,
2379 #ifdef CONFIG_NET_POLL_CONTROLLER
2380 	.ndo_poll_controller	= ena_netpoll,
2381 #endif /* CONFIG_NET_POLL_CONTROLLER */
2382 };
2383 
2384 static int ena_device_validate_params(struct ena_adapter *adapter,
2385 				      struct ena_com_dev_get_features_ctx *get_feat_ctx)
2386 {
2387 	struct net_device *netdev = adapter->netdev;
2388 	int rc;
2389 
2390 	rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2391 			      adapter->mac_addr);
2392 	if (!rc) {
2393 		netif_err(adapter, drv, netdev,
2394 			  "Error, mac address are different\n");
2395 		return -EINVAL;
2396 	}
2397 
2398 	if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2399 	    (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2400 		netif_err(adapter, drv, netdev,
2401 			  "Error, device doesn't support enough queues\n");
2402 		return -EINVAL;
2403 	}
2404 
2405 	if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2406 		netif_err(adapter, drv, netdev,
2407 			  "Error, device max mtu is smaller than netdev MTU\n");
2408 		return -EINVAL;
2409 	}
2410 
2411 	return 0;
2412 }
2413 
2414 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2415 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
2416 			   bool *wd_state)
2417 {
2418 	struct device *dev = &pdev->dev;
2419 	bool readless_supported;
2420 	u32 aenq_groups;
2421 	int dma_width;
2422 	int rc;
2423 
2424 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2425 	if (rc) {
2426 		dev_err(dev, "failed to init mmio read less\n");
2427 		return rc;
2428 	}
2429 
2430 	/* The PCIe configuration space revision id indicate if mmio reg
2431 	 * read is disabled
2432 	 */
2433 	readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2434 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2435 
2436 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2437 	if (rc) {
2438 		dev_err(dev, "Can not reset device\n");
2439 		goto err_mmio_read_less;
2440 	}
2441 
2442 	rc = ena_com_validate_version(ena_dev);
2443 	if (rc) {
2444 		dev_err(dev, "device version is too low\n");
2445 		goto err_mmio_read_less;
2446 	}
2447 
2448 	dma_width = ena_com_get_dma_width(ena_dev);
2449 	if (dma_width < 0) {
2450 		dev_err(dev, "Invalid dma width value %d", dma_width);
2451 		rc = dma_width;
2452 		goto err_mmio_read_less;
2453 	}
2454 
2455 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2456 	if (rc) {
2457 		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2458 		goto err_mmio_read_less;
2459 	}
2460 
2461 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2462 	if (rc) {
2463 		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2464 			rc);
2465 		goto err_mmio_read_less;
2466 	}
2467 
2468 	/* ENA admin level init */
2469 	rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2470 	if (rc) {
2471 		dev_err(dev,
2472 			"Can not initialize ena admin queue with device\n");
2473 		goto err_mmio_read_less;
2474 	}
2475 
2476 	/* To enable the msix interrupts the driver needs to know the number
2477 	 * of queues. So the driver uses polling mode to retrieve this
2478 	 * information
2479 	 */
2480 	ena_com_set_admin_polling_mode(ena_dev, true);
2481 
2482 	ena_config_host_info(ena_dev);
2483 
2484 	/* Get Device Attributes*/
2485 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2486 	if (rc) {
2487 		dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2488 		goto err_admin_init;
2489 	}
2490 
2491 	/* Try to turn all the available aenq groups */
2492 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2493 		BIT(ENA_ADMIN_FATAL_ERROR) |
2494 		BIT(ENA_ADMIN_WARNING) |
2495 		BIT(ENA_ADMIN_NOTIFICATION) |
2496 		BIT(ENA_ADMIN_KEEP_ALIVE);
2497 
2498 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2499 
2500 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2501 	if (rc) {
2502 		dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2503 		goto err_admin_init;
2504 	}
2505 
2506 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2507 
2508 	return 0;
2509 
2510 err_admin_init:
2511 	ena_com_delete_host_info(ena_dev);
2512 	ena_com_admin_destroy(ena_dev);
2513 err_mmio_read_less:
2514 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2515 
2516 	return rc;
2517 }
2518 
2519 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2520 						    int io_vectors)
2521 {
2522 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2523 	struct device *dev = &adapter->pdev->dev;
2524 	int rc;
2525 
2526 	rc = ena_enable_msix(adapter, io_vectors);
2527 	if (rc) {
2528 		dev_err(dev, "Can not reserve msix vectors\n");
2529 		return rc;
2530 	}
2531 
2532 	ena_setup_mgmnt_intr(adapter);
2533 
2534 	rc = ena_request_mgmnt_irq(adapter);
2535 	if (rc) {
2536 		dev_err(dev, "Can not setup management interrupts\n");
2537 		goto err_disable_msix;
2538 	}
2539 
2540 	ena_com_set_admin_polling_mode(ena_dev, false);
2541 
2542 	ena_com_admin_aenq_enable(ena_dev);
2543 
2544 	return 0;
2545 
2546 err_disable_msix:
2547 	ena_disable_msix(adapter);
2548 
2549 	return rc;
2550 }
2551 
2552 static void ena_destroy_device(struct ena_adapter *adapter)
2553 {
2554 	struct net_device *netdev = adapter->netdev;
2555 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2556 	bool dev_up;
2557 
2558 	netif_carrier_off(netdev);
2559 
2560 	del_timer_sync(&adapter->timer_service);
2561 
2562 	dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2563 	adapter->dev_up_before_reset = dev_up;
2564 
2565 	ena_com_set_admin_running_state(ena_dev, false);
2566 
2567 	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2568 		ena_down(adapter);
2569 
2570 	/* Before releasing the ENA resources, a device reset is required.
2571 	 * (to prevent the device from accessing them).
2572 	 * In case the reset flag is set and the device is up, ena_down()
2573 	 * already perform the reset, so it can be skipped.
2574 	 */
2575 	if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2576 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2577 
2578 	ena_free_mgmnt_irq(adapter);
2579 
2580 	ena_disable_msix(adapter);
2581 
2582 	ena_com_abort_admin_commands(ena_dev);
2583 
2584 	ena_com_wait_for_abort_completion(ena_dev);
2585 
2586 	ena_com_admin_destroy(ena_dev);
2587 
2588 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2589 
2590 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2591 
2592 	clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2593 }
2594 
2595 static int ena_restore_device(struct ena_adapter *adapter)
2596 {
2597 	struct ena_com_dev_get_features_ctx get_feat_ctx;
2598 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2599 	struct pci_dev *pdev = adapter->pdev;
2600 	bool wd_state;
2601 	int rc;
2602 
2603 	set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2604 	rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2605 	if (rc) {
2606 		dev_err(&pdev->dev, "Can not initialize device\n");
2607 		goto err;
2608 	}
2609 	adapter->wd_state = wd_state;
2610 
2611 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
2612 	if (rc) {
2613 		dev_err(&pdev->dev, "Validation of device parameters failed\n");
2614 		goto err_device_destroy;
2615 	}
2616 
2617 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2618 	/* Make sure we don't have a race with AENQ Links state handler */
2619 	if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2620 		netif_carrier_on(adapter->netdev);
2621 
2622 	rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2623 						      adapter->num_queues);
2624 	if (rc) {
2625 		dev_err(&pdev->dev, "Enable MSI-X failed\n");
2626 		goto err_device_destroy;
2627 	}
2628 	/* If the interface was up before the reset bring it up */
2629 	if (adapter->dev_up_before_reset) {
2630 		rc = ena_up(adapter);
2631 		if (rc) {
2632 			dev_err(&pdev->dev, "Failed to create I/O queues\n");
2633 			goto err_disable_msix;
2634 		}
2635 	}
2636 
2637 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2638 	dev_err(&pdev->dev, "Device reset completed successfully\n");
2639 
2640 	return rc;
2641 err_disable_msix:
2642 	ena_free_mgmnt_irq(adapter);
2643 	ena_disable_msix(adapter);
2644 err_device_destroy:
2645 	ena_com_admin_destroy(ena_dev);
2646 err:
2647 	clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2648 	clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2649 	dev_err(&pdev->dev,
2650 		"Reset attempt failed. Can not reset the device\n");
2651 
2652 	return rc;
2653 }
2654 
2655 static void ena_fw_reset_device(struct work_struct *work)
2656 {
2657 	struct ena_adapter *adapter =
2658 		container_of(work, struct ena_adapter, reset_task);
2659 	struct pci_dev *pdev = adapter->pdev;
2660 
2661 	if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2662 		dev_err(&pdev->dev,
2663 			"device reset schedule while reset bit is off\n");
2664 		return;
2665 	}
2666 	rtnl_lock();
2667 	ena_destroy_device(adapter);
2668 	ena_restore_device(adapter);
2669 	rtnl_unlock();
2670 }
2671 
2672 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2673 					struct ena_ring *rx_ring)
2674 {
2675 	if (likely(rx_ring->first_interrupt))
2676 		return 0;
2677 
2678 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2679 		return 0;
2680 
2681 	rx_ring->no_interrupt_event_cnt++;
2682 
2683 	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2684 		netif_err(adapter, rx_err, adapter->netdev,
2685 			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2686 			  rx_ring->qid);
2687 		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2688 		smp_mb__before_atomic();
2689 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2690 		return -EIO;
2691 	}
2692 
2693 	return 0;
2694 }
2695 
2696 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2697 					  struct ena_ring *tx_ring)
2698 {
2699 	struct ena_tx_buffer *tx_buf;
2700 	unsigned long last_jiffies;
2701 	u32 missed_tx = 0;
2702 	int i, rc = 0;
2703 
2704 	for (i = 0; i < tx_ring->ring_size; i++) {
2705 		tx_buf = &tx_ring->tx_buffer_info[i];
2706 		last_jiffies = tx_buf->last_jiffies;
2707 
2708 		if (last_jiffies == 0)
2709 			/* no pending Tx at this location */
2710 			continue;
2711 
2712 		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2713 			     2 * adapter->missing_tx_completion_to))) {
2714 			/* If after graceful period interrupt is still not
2715 			 * received, we schedule a reset
2716 			 */
2717 			netif_err(adapter, tx_err, adapter->netdev,
2718 				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2719 				  tx_ring->qid);
2720 			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2721 			smp_mb__before_atomic();
2722 			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2723 			return -EIO;
2724 		}
2725 
2726 		if (unlikely(time_is_before_jiffies(last_jiffies +
2727 				adapter->missing_tx_completion_to))) {
2728 			if (!tx_buf->print_once)
2729 				netif_notice(adapter, tx_err, adapter->netdev,
2730 					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2731 					     tx_ring->qid, i);
2732 
2733 			tx_buf->print_once = 1;
2734 			missed_tx++;
2735 		}
2736 	}
2737 
2738 	if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2739 		netif_err(adapter, tx_err, adapter->netdev,
2740 			  "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2741 			  missed_tx,
2742 			  adapter->missing_tx_completion_threshold);
2743 		adapter->reset_reason =
2744 			ENA_REGS_RESET_MISS_TX_CMPL;
2745 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2746 		rc = -EIO;
2747 	}
2748 
2749 	u64_stats_update_begin(&tx_ring->syncp);
2750 	tx_ring->tx_stats.missed_tx = missed_tx;
2751 	u64_stats_update_end(&tx_ring->syncp);
2752 
2753 	return rc;
2754 }
2755 
2756 static void check_for_missing_completions(struct ena_adapter *adapter)
2757 {
2758 	struct ena_ring *tx_ring;
2759 	struct ena_ring *rx_ring;
2760 	int i, budget, rc;
2761 
2762 	/* Make sure the driver doesn't turn the device in other process */
2763 	smp_rmb();
2764 
2765 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2766 		return;
2767 
2768 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2769 		return;
2770 
2771 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2772 		return;
2773 
2774 	budget = ENA_MONITORED_TX_QUEUES;
2775 
2776 	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2777 		tx_ring = &adapter->tx_ring[i];
2778 		rx_ring = &adapter->rx_ring[i];
2779 
2780 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2781 		if (unlikely(rc))
2782 			return;
2783 
2784 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2785 		if (unlikely(rc))
2786 			return;
2787 
2788 		budget--;
2789 		if (!budget)
2790 			break;
2791 	}
2792 
2793 	adapter->last_monitored_tx_qid = i % adapter->num_queues;
2794 }
2795 
2796 /* trigger napi schedule after 2 consecutive detections */
2797 #define EMPTY_RX_REFILL 2
2798 /* For the rare case where the device runs out of Rx descriptors and the
2799  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2800  * for example).
2801  * This case will lead to a deadlock:
2802  * The device won't send interrupts since all the new Rx packets will be dropped
2803  * The napi handler won't allocate new Rx descriptors so the device will be
2804  * able to send new packets.
2805  *
2806  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2807  * It is recommended to have at least 512MB, with a minimum of 128MB for
2808  * constrained environment).
2809  *
2810  * When such a situation is detected - Reschedule napi
2811  */
2812 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2813 {
2814 	struct ena_ring *rx_ring;
2815 	int i, refill_required;
2816 
2817 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2818 		return;
2819 
2820 	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2821 		return;
2822 
2823 	for (i = 0; i < adapter->num_queues; i++) {
2824 		rx_ring = &adapter->rx_ring[i];
2825 
2826 		refill_required =
2827 			ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2828 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2829 			rx_ring->empty_rx_queue++;
2830 
2831 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2832 				u64_stats_update_begin(&rx_ring->syncp);
2833 				rx_ring->rx_stats.empty_rx_ring++;
2834 				u64_stats_update_end(&rx_ring->syncp);
2835 
2836 				netif_err(adapter, drv, adapter->netdev,
2837 					  "trigger refill for ring %d\n", i);
2838 
2839 				napi_schedule(rx_ring->napi);
2840 				rx_ring->empty_rx_queue = 0;
2841 			}
2842 		} else {
2843 			rx_ring->empty_rx_queue = 0;
2844 		}
2845 	}
2846 }
2847 
2848 /* Check for keep alive expiration */
2849 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2850 {
2851 	unsigned long keep_alive_expired;
2852 
2853 	if (!adapter->wd_state)
2854 		return;
2855 
2856 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2857 		return;
2858 
2859 	keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2860 					   adapter->keep_alive_timeout);
2861 	if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2862 		netif_err(adapter, drv, adapter->netdev,
2863 			  "Keep alive watchdog timeout.\n");
2864 		u64_stats_update_begin(&adapter->syncp);
2865 		adapter->dev_stats.wd_expired++;
2866 		u64_stats_update_end(&adapter->syncp);
2867 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2868 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2869 	}
2870 }
2871 
2872 static void check_for_admin_com_state(struct ena_adapter *adapter)
2873 {
2874 	if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2875 		netif_err(adapter, drv, adapter->netdev,
2876 			  "ENA admin queue is not in running state!\n");
2877 		u64_stats_update_begin(&adapter->syncp);
2878 		adapter->dev_stats.admin_q_pause++;
2879 		u64_stats_update_end(&adapter->syncp);
2880 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2881 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2882 	}
2883 }
2884 
2885 static void ena_update_hints(struct ena_adapter *adapter,
2886 			     struct ena_admin_ena_hw_hints *hints)
2887 {
2888 	struct net_device *netdev = adapter->netdev;
2889 
2890 	if (hints->admin_completion_tx_timeout)
2891 		adapter->ena_dev->admin_queue.completion_timeout =
2892 			hints->admin_completion_tx_timeout * 1000;
2893 
2894 	if (hints->mmio_read_timeout)
2895 		/* convert to usec */
2896 		adapter->ena_dev->mmio_read.reg_read_to =
2897 			hints->mmio_read_timeout * 1000;
2898 
2899 	if (hints->missed_tx_completion_count_threshold_to_reset)
2900 		adapter->missing_tx_completion_threshold =
2901 			hints->missed_tx_completion_count_threshold_to_reset;
2902 
2903 	if (hints->missing_tx_completion_timeout) {
2904 		if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2905 			adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2906 		else
2907 			adapter->missing_tx_completion_to =
2908 				msecs_to_jiffies(hints->missing_tx_completion_timeout);
2909 	}
2910 
2911 	if (hints->netdev_wd_timeout)
2912 		netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2913 
2914 	if (hints->driver_watchdog_timeout) {
2915 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2916 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2917 		else
2918 			adapter->keep_alive_timeout =
2919 				msecs_to_jiffies(hints->driver_watchdog_timeout);
2920 	}
2921 }
2922 
2923 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2924 				 struct net_device *netdev)
2925 {
2926 	host_info->supported_network_features[0] =
2927 		netdev->features & GENMASK_ULL(31, 0);
2928 	host_info->supported_network_features[1] =
2929 		(netdev->features & GENMASK_ULL(63, 32)) >> 32;
2930 }
2931 
2932 static void ena_timer_service(struct timer_list *t)
2933 {
2934 	struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2935 	u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2936 	struct ena_admin_host_info *host_info =
2937 		adapter->ena_dev->host_attr.host_info;
2938 
2939 	check_for_missing_keep_alive(adapter);
2940 
2941 	check_for_admin_com_state(adapter);
2942 
2943 	check_for_missing_completions(adapter);
2944 
2945 	check_for_empty_rx_ring(adapter);
2946 
2947 	if (debug_area)
2948 		ena_dump_stats_to_buf(adapter, debug_area);
2949 
2950 	if (host_info)
2951 		ena_update_host_info(host_info, adapter->netdev);
2952 
2953 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2954 		netif_err(adapter, drv, adapter->netdev,
2955 			  "Trigger reset is on\n");
2956 		ena_dump_stats_to_dmesg(adapter);
2957 		queue_work(ena_wq, &adapter->reset_task);
2958 		return;
2959 	}
2960 
2961 	/* Reset the timer */
2962 	mod_timer(&adapter->timer_service, jiffies + HZ);
2963 }
2964 
2965 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2966 				 struct ena_com_dev *ena_dev,
2967 				 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2968 {
2969 	int io_sq_num, io_queue_num;
2970 
2971 	/* In case of LLQ use the llq number in the get feature cmd */
2972 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2973 		io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2974 
2975 		if (io_sq_num == 0) {
2976 			dev_err(&pdev->dev,
2977 				"Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2978 
2979 			ena_dev->tx_mem_queue_type =
2980 				ENA_ADMIN_PLACEMENT_POLICY_HOST;
2981 			io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2982 		}
2983 	} else {
2984 		io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2985 	}
2986 
2987 	io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2988 	io_queue_num = min_t(int, io_queue_num, io_sq_num);
2989 	io_queue_num = min_t(int, io_queue_num,
2990 			     get_feat_ctx->max_queues.max_cq_num);
2991 	/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2992 	io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2993 	if (unlikely(!io_queue_num)) {
2994 		dev_err(&pdev->dev, "The device doesn't have io queues\n");
2995 		return -EFAULT;
2996 	}
2997 
2998 	return io_queue_num;
2999 }
3000 
3001 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3002 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
3003 {
3004 	bool has_mem_bar;
3005 
3006 	has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3007 
3008 	/* Enable push mode if device supports LLQ */
3009 	if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
3010 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3011 	else
3012 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3013 }
3014 
3015 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3016 				 struct net_device *netdev)
3017 {
3018 	netdev_features_t dev_features = 0;
3019 
3020 	/* Set offload features */
3021 	if (feat->offload.tx &
3022 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3023 		dev_features |= NETIF_F_IP_CSUM;
3024 
3025 	if (feat->offload.tx &
3026 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3027 		dev_features |= NETIF_F_IPV6_CSUM;
3028 
3029 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3030 		dev_features |= NETIF_F_TSO;
3031 
3032 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3033 		dev_features |= NETIF_F_TSO6;
3034 
3035 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3036 		dev_features |= NETIF_F_TSO_ECN;
3037 
3038 	if (feat->offload.rx_supported &
3039 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3040 		dev_features |= NETIF_F_RXCSUM;
3041 
3042 	if (feat->offload.rx_supported &
3043 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3044 		dev_features |= NETIF_F_RXCSUM;
3045 
3046 	netdev->features =
3047 		dev_features |
3048 		NETIF_F_SG |
3049 		NETIF_F_RXHASH |
3050 		NETIF_F_HIGHDMA;
3051 
3052 	netdev->hw_features |= netdev->features;
3053 	netdev->vlan_features |= netdev->features;
3054 }
3055 
3056 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3057 				     struct ena_com_dev_get_features_ctx *feat)
3058 {
3059 	struct net_device *netdev = adapter->netdev;
3060 
3061 	/* Copy mac address */
3062 	if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3063 		eth_hw_addr_random(netdev);
3064 		ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3065 	} else {
3066 		ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3067 		ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3068 	}
3069 
3070 	/* Set offload features */
3071 	ena_set_dev_offloads(feat, netdev);
3072 
3073 	adapter->max_mtu = feat->dev_attr.max_mtu;
3074 	netdev->max_mtu = adapter->max_mtu;
3075 	netdev->min_mtu = ENA_MIN_MTU;
3076 }
3077 
3078 static int ena_rss_init_default(struct ena_adapter *adapter)
3079 {
3080 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3081 	struct device *dev = &adapter->pdev->dev;
3082 	int rc, i;
3083 	u32 val;
3084 
3085 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3086 	if (unlikely(rc)) {
3087 		dev_err(dev, "Cannot init indirect table\n");
3088 		goto err_rss_init;
3089 	}
3090 
3091 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3092 		val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3093 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3094 						       ENA_IO_RXQ_IDX(val));
3095 		if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3096 			dev_err(dev, "Cannot fill indirect table\n");
3097 			goto err_fill_indir;
3098 		}
3099 	}
3100 
3101 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3102 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3103 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3104 		dev_err(dev, "Cannot fill hash function\n");
3105 		goto err_fill_indir;
3106 	}
3107 
3108 	rc = ena_com_set_default_hash_ctrl(ena_dev);
3109 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3110 		dev_err(dev, "Cannot fill hash control\n");
3111 		goto err_fill_indir;
3112 	}
3113 
3114 	return 0;
3115 
3116 err_fill_indir:
3117 	ena_com_rss_destroy(ena_dev);
3118 err_rss_init:
3119 
3120 	return rc;
3121 }
3122 
3123 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3124 {
3125 	int release_bars;
3126 
3127 	if (ena_dev->mem_bar)
3128 		devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3129 
3130 	if (ena_dev->reg_bar)
3131 		devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3132 
3133 	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3134 	pci_release_selected_regions(pdev, release_bars);
3135 }
3136 
3137 static int ena_calc_queue_size(struct pci_dev *pdev,
3138 			       struct ena_com_dev *ena_dev,
3139 			       u16 *max_tx_sgl_size,
3140 			       u16 *max_rx_sgl_size,
3141 			       struct ena_com_dev_get_features_ctx *get_feat_ctx)
3142 {
3143 	u32 queue_size = ENA_DEFAULT_RING_SIZE;
3144 
3145 	queue_size = min_t(u32, queue_size,
3146 			   get_feat_ctx->max_queues.max_cq_depth);
3147 	queue_size = min_t(u32, queue_size,
3148 			   get_feat_ctx->max_queues.max_sq_depth);
3149 
3150 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3151 		queue_size = min_t(u32, queue_size,
3152 				   get_feat_ctx->max_queues.max_llq_depth);
3153 
3154 	queue_size = rounddown_pow_of_two(queue_size);
3155 
3156 	if (unlikely(!queue_size)) {
3157 		dev_err(&pdev->dev, "Invalid queue size\n");
3158 		return -EFAULT;
3159 	}
3160 
3161 	*max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3162 				 get_feat_ctx->max_queues.max_packet_tx_descs);
3163 	*max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3164 				 get_feat_ctx->max_queues.max_packet_rx_descs);
3165 
3166 	return queue_size;
3167 }
3168 
3169 /* ena_probe - Device Initialization Routine
3170  * @pdev: PCI device information struct
3171  * @ent: entry in ena_pci_tbl
3172  *
3173  * Returns 0 on success, negative on failure
3174  *
3175  * ena_probe initializes an adapter identified by a pci_dev structure.
3176  * The OS initialization, configuring of the adapter private structure,
3177  * and a hardware reset occur.
3178  */
3179 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3180 {
3181 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3182 	static int version_printed;
3183 	struct net_device *netdev;
3184 	struct ena_adapter *adapter;
3185 	struct ena_com_dev *ena_dev = NULL;
3186 	static int adapters_found;
3187 	int io_queue_num, bars, rc;
3188 	int queue_size;
3189 	u16 tx_sgl_size = 0;
3190 	u16 rx_sgl_size = 0;
3191 	bool wd_state;
3192 
3193 	dev_dbg(&pdev->dev, "%s\n", __func__);
3194 
3195 	if (version_printed++ == 0)
3196 		dev_info(&pdev->dev, "%s", version);
3197 
3198 	rc = pci_enable_device_mem(pdev);
3199 	if (rc) {
3200 		dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3201 		return rc;
3202 	}
3203 
3204 	pci_set_master(pdev);
3205 
3206 	ena_dev = vzalloc(sizeof(*ena_dev));
3207 	if (!ena_dev) {
3208 		rc = -ENOMEM;
3209 		goto err_disable_device;
3210 	}
3211 
3212 	bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3213 	rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3214 	if (rc) {
3215 		dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3216 			rc);
3217 		goto err_free_ena_dev;
3218 	}
3219 
3220 	ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3221 					pci_resource_start(pdev, ENA_REG_BAR),
3222 					pci_resource_len(pdev, ENA_REG_BAR));
3223 	if (!ena_dev->reg_bar) {
3224 		dev_err(&pdev->dev, "failed to remap regs bar\n");
3225 		rc = -EFAULT;
3226 		goto err_free_region;
3227 	}
3228 
3229 	ena_dev->dmadev = &pdev->dev;
3230 
3231 	rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3232 	if (rc) {
3233 		dev_err(&pdev->dev, "ena device init failed\n");
3234 		if (rc == -ETIME)
3235 			rc = -EPROBE_DEFER;
3236 		goto err_free_region;
3237 	}
3238 
3239 	ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
3240 
3241 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3242 		ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3243 						   pci_resource_start(pdev, ENA_MEM_BAR),
3244 						   pci_resource_len(pdev, ENA_MEM_BAR));
3245 		if (!ena_dev->mem_bar) {
3246 			rc = -EFAULT;
3247 			goto err_device_destroy;
3248 		}
3249 	}
3250 
3251 	/* initial Tx interrupt delay, Assumes 1 usec granularity.
3252 	* Updated during device initialization with the real granularity
3253 	*/
3254 	ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3255 	io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3256 	queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3257 					 &rx_sgl_size, &get_feat_ctx);
3258 	if ((queue_size <= 0) || (io_queue_num <= 0)) {
3259 		rc = -EFAULT;
3260 		goto err_device_destroy;
3261 	}
3262 
3263 	dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
3264 		 io_queue_num, queue_size);
3265 
3266 	/* dev zeroed in init_etherdev */
3267 	netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3268 	if (!netdev) {
3269 		dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3270 		rc = -ENOMEM;
3271 		goto err_device_destroy;
3272 	}
3273 
3274 	SET_NETDEV_DEV(netdev, &pdev->dev);
3275 
3276 	adapter = netdev_priv(netdev);
3277 	pci_set_drvdata(pdev, adapter);
3278 
3279 	adapter->ena_dev = ena_dev;
3280 	adapter->netdev = netdev;
3281 	adapter->pdev = pdev;
3282 
3283 	ena_set_conf_feat_params(adapter, &get_feat_ctx);
3284 
3285 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3286 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3287 
3288 	adapter->tx_ring_size = queue_size;
3289 	adapter->rx_ring_size = queue_size;
3290 
3291 	adapter->max_tx_sgl_size = tx_sgl_size;
3292 	adapter->max_rx_sgl_size = rx_sgl_size;
3293 
3294 	adapter->num_queues = io_queue_num;
3295 	adapter->last_monitored_tx_qid = 0;
3296 
3297 	adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3298 	adapter->wd_state = wd_state;
3299 
3300 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3301 
3302 	rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3303 	if (rc) {
3304 		dev_err(&pdev->dev,
3305 			"Failed to query interrupt moderation feature\n");
3306 		goto err_netdev_destroy;
3307 	}
3308 	ena_init_io_rings(adapter);
3309 
3310 	netdev->netdev_ops = &ena_netdev_ops;
3311 	netdev->watchdog_timeo = TX_TIMEOUT;
3312 	ena_set_ethtool_ops(netdev);
3313 
3314 	netdev->priv_flags |= IFF_UNICAST_FLT;
3315 
3316 	u64_stats_init(&adapter->syncp);
3317 
3318 	rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3319 	if (rc) {
3320 		dev_err(&pdev->dev,
3321 			"Failed to enable and set the admin interrupts\n");
3322 		goto err_worker_destroy;
3323 	}
3324 	rc = ena_rss_init_default(adapter);
3325 	if (rc && (rc != -EOPNOTSUPP)) {
3326 		dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3327 		goto err_free_msix;
3328 	}
3329 
3330 	ena_config_debug_area(adapter);
3331 
3332 	memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3333 
3334 	netif_carrier_off(netdev);
3335 
3336 	rc = register_netdev(netdev);
3337 	if (rc) {
3338 		dev_err(&pdev->dev, "Cannot register net device\n");
3339 		goto err_rss;
3340 	}
3341 
3342 	INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3343 
3344 	adapter->last_keep_alive_jiffies = jiffies;
3345 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3346 	adapter->missing_tx_completion_to = TX_TIMEOUT;
3347 	adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3348 
3349 	ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3350 
3351 	timer_setup(&adapter->timer_service, ena_timer_service, 0);
3352 	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3353 
3354 	dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
3355 		 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3356 		 netdev->dev_addr, io_queue_num);
3357 
3358 	set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3359 
3360 	adapters_found++;
3361 
3362 	return 0;
3363 
3364 err_rss:
3365 	ena_com_delete_debug_area(ena_dev);
3366 	ena_com_rss_destroy(ena_dev);
3367 err_free_msix:
3368 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3369 	ena_free_mgmnt_irq(adapter);
3370 	ena_disable_msix(adapter);
3371 err_worker_destroy:
3372 	ena_com_destroy_interrupt_moderation(ena_dev);
3373 	del_timer(&adapter->timer_service);
3374 err_netdev_destroy:
3375 	free_netdev(netdev);
3376 err_device_destroy:
3377 	ena_com_delete_host_info(ena_dev);
3378 	ena_com_admin_destroy(ena_dev);
3379 err_free_region:
3380 	ena_release_bars(ena_dev, pdev);
3381 err_free_ena_dev:
3382 	vfree(ena_dev);
3383 err_disable_device:
3384 	pci_disable_device(pdev);
3385 	return rc;
3386 }
3387 
3388 /*****************************************************************************/
3389 
3390 /* ena_remove - Device Removal Routine
3391  * @pdev: PCI device information struct
3392  *
3393  * ena_remove is called by the PCI subsystem to alert the driver
3394  * that it should release a PCI device.
3395  */
3396 static void ena_remove(struct pci_dev *pdev)
3397 {
3398 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3399 	struct ena_com_dev *ena_dev;
3400 	struct net_device *netdev;
3401 
3402 	ena_dev = adapter->ena_dev;
3403 	netdev = adapter->netdev;
3404 
3405 #ifdef CONFIG_RFS_ACCEL
3406 	if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3407 		free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3408 		netdev->rx_cpu_rmap = NULL;
3409 	}
3410 #endif /* CONFIG_RFS_ACCEL */
3411 
3412 	unregister_netdev(netdev);
3413 	del_timer_sync(&adapter->timer_service);
3414 
3415 	cancel_work_sync(&adapter->reset_task);
3416 
3417 	/* Reset the device only if the device is running. */
3418 	if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3419 		ena_com_dev_reset(ena_dev, adapter->reset_reason);
3420 
3421 	ena_free_mgmnt_irq(adapter);
3422 
3423 	ena_disable_msix(adapter);
3424 
3425 	free_netdev(netdev);
3426 
3427 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3428 
3429 	ena_com_abort_admin_commands(ena_dev);
3430 
3431 	ena_com_wait_for_abort_completion(ena_dev);
3432 
3433 	ena_com_admin_destroy(ena_dev);
3434 
3435 	ena_com_rss_destroy(ena_dev);
3436 
3437 	ena_com_delete_debug_area(ena_dev);
3438 
3439 	ena_com_delete_host_info(ena_dev);
3440 
3441 	ena_release_bars(ena_dev, pdev);
3442 
3443 	pci_disable_device(pdev);
3444 
3445 	ena_com_destroy_interrupt_moderation(ena_dev);
3446 
3447 	vfree(ena_dev);
3448 }
3449 
3450 #ifdef CONFIG_PM
3451 /* ena_suspend - PM suspend callback
3452  * @pdev: PCI device information struct
3453  * @state:power state
3454  */
3455 static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
3456 {
3457 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3458 
3459 	u64_stats_update_begin(&adapter->syncp);
3460 	adapter->dev_stats.suspend++;
3461 	u64_stats_update_end(&adapter->syncp);
3462 
3463 	rtnl_lock();
3464 	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3465 		dev_err(&pdev->dev,
3466 			"ignoring device reset request as the device is being suspended\n");
3467 		clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3468 	}
3469 	ena_destroy_device(adapter);
3470 	rtnl_unlock();
3471 	return 0;
3472 }
3473 
3474 /* ena_resume - PM resume callback
3475  * @pdev: PCI device information struct
3476  *
3477  */
3478 static int ena_resume(struct pci_dev *pdev)
3479 {
3480 	struct ena_adapter *adapter = pci_get_drvdata(pdev);
3481 	int rc;
3482 
3483 	u64_stats_update_begin(&adapter->syncp);
3484 	adapter->dev_stats.resume++;
3485 	u64_stats_update_end(&adapter->syncp);
3486 
3487 	rtnl_lock();
3488 	rc = ena_restore_device(adapter);
3489 	rtnl_unlock();
3490 	return rc;
3491 }
3492 #endif
3493 
3494 static struct pci_driver ena_pci_driver = {
3495 	.name		= DRV_MODULE_NAME,
3496 	.id_table	= ena_pci_tbl,
3497 	.probe		= ena_probe,
3498 	.remove		= ena_remove,
3499 #ifdef CONFIG_PM
3500 	.suspend    = ena_suspend,
3501 	.resume     = ena_resume,
3502 #endif
3503 	.sriov_configure = pci_sriov_configure_simple,
3504 };
3505 
3506 static int __init ena_init(void)
3507 {
3508 	pr_info("%s", version);
3509 
3510 	ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3511 	if (!ena_wq) {
3512 		pr_err("Failed to create workqueue\n");
3513 		return -ENOMEM;
3514 	}
3515 
3516 	return pci_register_driver(&ena_pci_driver);
3517 }
3518 
3519 static void __exit ena_cleanup(void)
3520 {
3521 	pci_unregister_driver(&ena_pci_driver);
3522 
3523 	if (ena_wq) {
3524 		destroy_workqueue(ena_wq);
3525 		ena_wq = NULL;
3526 	}
3527 }
3528 
3529 /******************************************************************************
3530  ******************************** AENQ Handlers *******************************
3531  *****************************************************************************/
3532 /* ena_update_on_link_change:
3533  * Notify the network interface about the change in link status
3534  */
3535 static void ena_update_on_link_change(void *adapter_data,
3536 				      struct ena_admin_aenq_entry *aenq_e)
3537 {
3538 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3539 	struct ena_admin_aenq_link_change_desc *aenq_desc =
3540 		(struct ena_admin_aenq_link_change_desc *)aenq_e;
3541 	int status = aenq_desc->flags &
3542 		ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3543 
3544 	if (status) {
3545 		netdev_dbg(adapter->netdev, "%s\n", __func__);
3546 		set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3547 		if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3548 			netif_carrier_on(adapter->netdev);
3549 	} else {
3550 		clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3551 		netif_carrier_off(adapter->netdev);
3552 	}
3553 }
3554 
3555 static void ena_keep_alive_wd(void *adapter_data,
3556 			      struct ena_admin_aenq_entry *aenq_e)
3557 {
3558 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3559 	struct ena_admin_aenq_keep_alive_desc *desc;
3560 	u64 rx_drops;
3561 
3562 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3563 	adapter->last_keep_alive_jiffies = jiffies;
3564 
3565 	rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3566 
3567 	u64_stats_update_begin(&adapter->syncp);
3568 	adapter->dev_stats.rx_drops = rx_drops;
3569 	u64_stats_update_end(&adapter->syncp);
3570 }
3571 
3572 static void ena_notification(void *adapter_data,
3573 			     struct ena_admin_aenq_entry *aenq_e)
3574 {
3575 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3576 	struct ena_admin_ena_hw_hints *hints;
3577 
3578 	WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3579 	     "Invalid group(%x) expected %x\n",
3580 	     aenq_e->aenq_common_desc.group,
3581 	     ENA_ADMIN_NOTIFICATION);
3582 
3583 	switch (aenq_e->aenq_common_desc.syndrom) {
3584 	case ENA_ADMIN_UPDATE_HINTS:
3585 		hints = (struct ena_admin_ena_hw_hints *)
3586 			(&aenq_e->inline_data_w4);
3587 		ena_update_hints(adapter, hints);
3588 		break;
3589 	default:
3590 		netif_err(adapter, drv, adapter->netdev,
3591 			  "Invalid aenq notification link state %d\n",
3592 			  aenq_e->aenq_common_desc.syndrom);
3593 	}
3594 }
3595 
3596 /* This handler will called for unknown event group or unimplemented handlers*/
3597 static void unimplemented_aenq_handler(void *data,
3598 				       struct ena_admin_aenq_entry *aenq_e)
3599 {
3600 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3601 
3602 	netif_err(adapter, drv, adapter->netdev,
3603 		  "Unknown event was received or event with unimplemented handler\n");
3604 }
3605 
3606 static struct ena_aenq_handlers aenq_handlers = {
3607 	.handlers = {
3608 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3609 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3610 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3611 	},
3612 	.unimplemented_handler = unimplemented_aenq_handler
3613 };
3614 
3615 module_init(ena_init);
3616 module_exit(ena_cleanup);
3617