1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #ifdef CONFIG_RFS_ACCEL 36 #include <linux/cpu_rmap.h> 37 #endif /* CONFIG_RFS_ACCEL */ 38 #include <linux/ethtool.h> 39 #include <linux/if_vlan.h> 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/numa.h> 43 #include <linux/pci.h> 44 #include <linux/utsname.h> 45 #include <linux/version.h> 46 #include <linux/vmalloc.h> 47 #include <net/ip.h> 48 49 #include "ena_netdev.h" 50 #include "ena_pci_id_tbl.h" 51 52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n"; 53 54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); 55 MODULE_DESCRIPTION(DEVICE_NAME); 56 MODULE_LICENSE("GPL"); 57 MODULE_VERSION(DRV_MODULE_VERSION); 58 59 /* Time in jiffies before concluding the transmitter is hung. */ 60 #define TX_TIMEOUT (5 * HZ) 61 62 #define ENA_NAPI_BUDGET 64 63 64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ 65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 66 static int debug = -1; 67 module_param(debug, int, 0); 68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 69 70 static struct ena_aenq_handlers aenq_handlers; 71 72 static struct workqueue_struct *ena_wq; 73 74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 75 76 static int ena_rss_init_default(struct ena_adapter *adapter); 77 static void check_for_admin_com_state(struct ena_adapter *adapter); 78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); 79 static int ena_restore_device(struct ena_adapter *adapter); 80 81 static void ena_tx_timeout(struct net_device *dev) 82 { 83 struct ena_adapter *adapter = netdev_priv(dev); 84 85 /* Change the state of the device to trigger reset 86 * Check that we are not in the middle or a trigger already 87 */ 88 89 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 90 return; 91 92 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; 93 u64_stats_update_begin(&adapter->syncp); 94 adapter->dev_stats.tx_timeout++; 95 u64_stats_update_end(&adapter->syncp); 96 97 netif_err(adapter, tx_err, dev, "Transmit time out\n"); 98 } 99 100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) 101 { 102 int i; 103 104 for (i = 0; i < adapter->num_queues; i++) 105 adapter->rx_ring[i].mtu = mtu; 106 } 107 108 static int ena_change_mtu(struct net_device *dev, int new_mtu) 109 { 110 struct ena_adapter *adapter = netdev_priv(dev); 111 int ret; 112 113 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 114 if (!ret) { 115 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); 116 update_rx_ring_mtu(adapter, new_mtu); 117 dev->mtu = new_mtu; 118 } else { 119 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", 120 new_mtu); 121 } 122 123 return ret; 124 } 125 126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) 127 { 128 #ifdef CONFIG_RFS_ACCEL 129 u32 i; 130 int rc; 131 132 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues); 133 if (!adapter->netdev->rx_cpu_rmap) 134 return -ENOMEM; 135 for (i = 0; i < adapter->num_queues; i++) { 136 int irq_idx = ENA_IO_IRQ_IDX(i); 137 138 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, 139 pci_irq_vector(adapter->pdev, irq_idx)); 140 if (rc) { 141 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 142 adapter->netdev->rx_cpu_rmap = NULL; 143 return rc; 144 } 145 } 146 #endif /* CONFIG_RFS_ACCEL */ 147 return 0; 148 } 149 150 static void ena_init_io_rings_common(struct ena_adapter *adapter, 151 struct ena_ring *ring, u16 qid) 152 { 153 ring->qid = qid; 154 ring->pdev = adapter->pdev; 155 ring->dev = &adapter->pdev->dev; 156 ring->netdev = adapter->netdev; 157 ring->napi = &adapter->ena_napi[qid].napi; 158 ring->adapter = adapter; 159 ring->ena_dev = adapter->ena_dev; 160 ring->per_napi_packets = 0; 161 ring->per_napi_bytes = 0; 162 ring->cpu = 0; 163 ring->first_interrupt = false; 164 ring->no_interrupt_event_cnt = 0; 165 u64_stats_init(&ring->syncp); 166 } 167 168 static void ena_init_io_rings(struct ena_adapter *adapter) 169 { 170 struct ena_com_dev *ena_dev; 171 struct ena_ring *txr, *rxr; 172 int i; 173 174 ena_dev = adapter->ena_dev; 175 176 for (i = 0; i < adapter->num_queues; i++) { 177 txr = &adapter->tx_ring[i]; 178 rxr = &adapter->rx_ring[i]; 179 180 /* TX/RX common ring state */ 181 ena_init_io_rings_common(adapter, txr, i); 182 ena_init_io_rings_common(adapter, rxr, i); 183 184 /* TX specific ring state */ 185 txr->ring_size = adapter->tx_ring_size; 186 txr->tx_max_header_size = ena_dev->tx_max_header_size; 187 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 188 txr->sgl_size = adapter->max_tx_sgl_size; 189 txr->smoothed_interval = 190 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 191 192 /* RX specific ring state */ 193 rxr->ring_size = adapter->rx_ring_size; 194 rxr->rx_copybreak = adapter->rx_copybreak; 195 rxr->sgl_size = adapter->max_rx_sgl_size; 196 rxr->smoothed_interval = 197 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 198 rxr->empty_rx_queue = 0; 199 } 200 } 201 202 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) 203 * @adapter: network interface device structure 204 * @qid: queue index 205 * 206 * Return 0 on success, negative on failure 207 */ 208 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 209 { 210 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 211 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 212 int size, i, node; 213 214 if (tx_ring->tx_buffer_info) { 215 netif_err(adapter, ifup, 216 adapter->netdev, "tx_buffer_info info is not NULL"); 217 return -EEXIST; 218 } 219 220 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 221 node = cpu_to_node(ena_irq->cpu); 222 223 tx_ring->tx_buffer_info = vzalloc_node(size, node); 224 if (!tx_ring->tx_buffer_info) { 225 tx_ring->tx_buffer_info = vzalloc(size); 226 if (!tx_ring->tx_buffer_info) 227 goto err_tx_buffer_info; 228 } 229 230 size = sizeof(u16) * tx_ring->ring_size; 231 tx_ring->free_tx_ids = vzalloc_node(size, node); 232 if (!tx_ring->free_tx_ids) { 233 tx_ring->free_tx_ids = vzalloc(size); 234 if (!tx_ring->free_tx_ids) 235 goto err_free_tx_ids; 236 } 237 238 size = tx_ring->tx_max_header_size; 239 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); 240 if (!tx_ring->push_buf_intermediate_buf) { 241 tx_ring->push_buf_intermediate_buf = vzalloc(size); 242 if (!tx_ring->push_buf_intermediate_buf) 243 goto err_push_buf_intermediate_buf; 244 } 245 246 /* Req id ring for TX out of order completions */ 247 for (i = 0; i < tx_ring->ring_size; i++) 248 tx_ring->free_tx_ids[i] = i; 249 250 /* Reset tx statistics */ 251 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); 252 253 tx_ring->next_to_use = 0; 254 tx_ring->next_to_clean = 0; 255 tx_ring->cpu = ena_irq->cpu; 256 return 0; 257 258 err_push_buf_intermediate_buf: 259 vfree(tx_ring->free_tx_ids); 260 tx_ring->free_tx_ids = NULL; 261 err_free_tx_ids: 262 vfree(tx_ring->tx_buffer_info); 263 tx_ring->tx_buffer_info = NULL; 264 err_tx_buffer_info: 265 return -ENOMEM; 266 } 267 268 /* ena_free_tx_resources - Free I/O Tx Resources per Queue 269 * @adapter: network interface device structure 270 * @qid: queue index 271 * 272 * Free all transmit software resources 273 */ 274 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) 275 { 276 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 277 278 vfree(tx_ring->tx_buffer_info); 279 tx_ring->tx_buffer_info = NULL; 280 281 vfree(tx_ring->free_tx_ids); 282 tx_ring->free_tx_ids = NULL; 283 284 vfree(tx_ring->push_buf_intermediate_buf); 285 tx_ring->push_buf_intermediate_buf = NULL; 286 } 287 288 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues 289 * @adapter: private structure 290 * 291 * Return 0 on success, negative on failure 292 */ 293 static int ena_setup_all_tx_resources(struct ena_adapter *adapter) 294 { 295 int i, rc = 0; 296 297 for (i = 0; i < adapter->num_queues; i++) { 298 rc = ena_setup_tx_resources(adapter, i); 299 if (rc) 300 goto err_setup_tx; 301 } 302 303 return 0; 304 305 err_setup_tx: 306 307 netif_err(adapter, ifup, adapter->netdev, 308 "Tx queue %d: allocation failed\n", i); 309 310 /* rewind the index freeing the rings as we go */ 311 while (i--) 312 ena_free_tx_resources(adapter, i); 313 return rc; 314 } 315 316 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues 317 * @adapter: board private structure 318 * 319 * Free all transmit software resources 320 */ 321 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) 322 { 323 int i; 324 325 for (i = 0; i < adapter->num_queues; i++) 326 ena_free_tx_resources(adapter, i); 327 } 328 329 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) 330 { 331 if (likely(req_id < rx_ring->ring_size)) 332 return 0; 333 334 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 335 "Invalid rx req_id: %hu\n", req_id); 336 337 u64_stats_update_begin(&rx_ring->syncp); 338 rx_ring->rx_stats.bad_req_id++; 339 u64_stats_update_end(&rx_ring->syncp); 340 341 /* Trigger device reset */ 342 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 343 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); 344 return -EFAULT; 345 } 346 347 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) 348 * @adapter: network interface device structure 349 * @qid: queue index 350 * 351 * Returns 0 on success, negative on failure 352 */ 353 static int ena_setup_rx_resources(struct ena_adapter *adapter, 354 u32 qid) 355 { 356 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 357 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 358 int size, node, i; 359 360 if (rx_ring->rx_buffer_info) { 361 netif_err(adapter, ifup, adapter->netdev, 362 "rx_buffer_info is not NULL"); 363 return -EEXIST; 364 } 365 366 /* alloc extra element so in rx path 367 * we can always prefetch rx_info + 1 368 */ 369 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); 370 node = cpu_to_node(ena_irq->cpu); 371 372 rx_ring->rx_buffer_info = vzalloc_node(size, node); 373 if (!rx_ring->rx_buffer_info) { 374 rx_ring->rx_buffer_info = vzalloc(size); 375 if (!rx_ring->rx_buffer_info) 376 return -ENOMEM; 377 } 378 379 size = sizeof(u16) * rx_ring->ring_size; 380 rx_ring->free_rx_ids = vzalloc_node(size, node); 381 if (!rx_ring->free_rx_ids) { 382 rx_ring->free_rx_ids = vzalloc(size); 383 if (!rx_ring->free_rx_ids) { 384 vfree(rx_ring->rx_buffer_info); 385 rx_ring->rx_buffer_info = NULL; 386 return -ENOMEM; 387 } 388 } 389 390 /* Req id ring for receiving RX pkts out of order */ 391 for (i = 0; i < rx_ring->ring_size; i++) 392 rx_ring->free_rx_ids[i] = i; 393 394 /* Reset rx statistics */ 395 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); 396 397 rx_ring->next_to_clean = 0; 398 rx_ring->next_to_use = 0; 399 rx_ring->cpu = ena_irq->cpu; 400 401 return 0; 402 } 403 404 /* ena_free_rx_resources - Free I/O Rx Resources 405 * @adapter: network interface device structure 406 * @qid: queue index 407 * 408 * Free all receive software resources 409 */ 410 static void ena_free_rx_resources(struct ena_adapter *adapter, 411 u32 qid) 412 { 413 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 414 415 vfree(rx_ring->rx_buffer_info); 416 rx_ring->rx_buffer_info = NULL; 417 418 vfree(rx_ring->free_rx_ids); 419 rx_ring->free_rx_ids = NULL; 420 } 421 422 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues 423 * @adapter: board private structure 424 * 425 * Return 0 on success, negative on failure 426 */ 427 static int ena_setup_all_rx_resources(struct ena_adapter *adapter) 428 { 429 int i, rc = 0; 430 431 for (i = 0; i < adapter->num_queues; i++) { 432 rc = ena_setup_rx_resources(adapter, i); 433 if (rc) 434 goto err_setup_rx; 435 } 436 437 return 0; 438 439 err_setup_rx: 440 441 netif_err(adapter, ifup, adapter->netdev, 442 "Rx queue %d: allocation failed\n", i); 443 444 /* rewind the index freeing the rings as we go */ 445 while (i--) 446 ena_free_rx_resources(adapter, i); 447 return rc; 448 } 449 450 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues 451 * @adapter: board private structure 452 * 453 * Free all receive software resources 454 */ 455 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) 456 { 457 int i; 458 459 for (i = 0; i < adapter->num_queues; i++) 460 ena_free_rx_resources(adapter, i); 461 } 462 463 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, 464 struct ena_rx_buffer *rx_info, gfp_t gfp) 465 { 466 struct ena_com_buf *ena_buf; 467 struct page *page; 468 dma_addr_t dma; 469 470 /* if previous allocated page is not used */ 471 if (unlikely(rx_info->page)) 472 return 0; 473 474 page = alloc_page(gfp); 475 if (unlikely(!page)) { 476 u64_stats_update_begin(&rx_ring->syncp); 477 rx_ring->rx_stats.page_alloc_fail++; 478 u64_stats_update_end(&rx_ring->syncp); 479 return -ENOMEM; 480 } 481 482 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, 483 DMA_FROM_DEVICE); 484 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 485 u64_stats_update_begin(&rx_ring->syncp); 486 rx_ring->rx_stats.dma_mapping_err++; 487 u64_stats_update_end(&rx_ring->syncp); 488 489 __free_page(page); 490 return -EIO; 491 } 492 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 493 "alloc page %p, rx_info %p\n", page, rx_info); 494 495 rx_info->page = page; 496 rx_info->page_offset = 0; 497 ena_buf = &rx_info->ena_buf; 498 ena_buf->paddr = dma; 499 ena_buf->len = ENA_PAGE_SIZE; 500 501 return 0; 502 } 503 504 static void ena_free_rx_page(struct ena_ring *rx_ring, 505 struct ena_rx_buffer *rx_info) 506 { 507 struct page *page = rx_info->page; 508 struct ena_com_buf *ena_buf = &rx_info->ena_buf; 509 510 if (unlikely(!page)) { 511 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 512 "Trying to free unallocated buffer\n"); 513 return; 514 } 515 516 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, 517 DMA_FROM_DEVICE); 518 519 __free_page(page); 520 rx_info->page = NULL; 521 } 522 523 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) 524 { 525 u16 next_to_use, req_id; 526 u32 i; 527 int rc; 528 529 next_to_use = rx_ring->next_to_use; 530 531 for (i = 0; i < num; i++) { 532 struct ena_rx_buffer *rx_info; 533 534 req_id = rx_ring->free_rx_ids[next_to_use]; 535 rc = validate_rx_req_id(rx_ring, req_id); 536 if (unlikely(rc < 0)) 537 break; 538 539 rx_info = &rx_ring->rx_buffer_info[req_id]; 540 541 542 rc = ena_alloc_rx_page(rx_ring, rx_info, 543 GFP_ATOMIC | __GFP_COMP); 544 if (unlikely(rc < 0)) { 545 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 546 "failed to alloc buffer for rx queue %d\n", 547 rx_ring->qid); 548 break; 549 } 550 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 551 &rx_info->ena_buf, 552 req_id); 553 if (unlikely(rc)) { 554 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 555 "failed to add buffer for rx queue %d\n", 556 rx_ring->qid); 557 break; 558 } 559 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 560 rx_ring->ring_size); 561 } 562 563 if (unlikely(i < num)) { 564 u64_stats_update_begin(&rx_ring->syncp); 565 rx_ring->rx_stats.refil_partial++; 566 u64_stats_update_end(&rx_ring->syncp); 567 netdev_warn(rx_ring->netdev, 568 "refilled rx qid %d with only %d buffers (from %d)\n", 569 rx_ring->qid, i, num); 570 } 571 572 /* ena_com_write_sq_doorbell issues a wmb() */ 573 if (likely(i)) 574 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 575 576 rx_ring->next_to_use = next_to_use; 577 578 return i; 579 } 580 581 static void ena_free_rx_bufs(struct ena_adapter *adapter, 582 u32 qid) 583 { 584 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 585 u32 i; 586 587 for (i = 0; i < rx_ring->ring_size; i++) { 588 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 589 590 if (rx_info->page) 591 ena_free_rx_page(rx_ring, rx_info); 592 } 593 } 594 595 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers 596 * @adapter: board private structure 597 * 598 */ 599 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) 600 { 601 struct ena_ring *rx_ring; 602 int i, rc, bufs_num; 603 604 for (i = 0; i < adapter->num_queues; i++) { 605 rx_ring = &adapter->rx_ring[i]; 606 bufs_num = rx_ring->ring_size - 1; 607 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 608 609 if (unlikely(rc != bufs_num)) 610 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 611 "refilling Queue %d failed. allocated %d buffers from: %d\n", 612 i, rc, bufs_num); 613 } 614 } 615 616 static void ena_free_all_rx_bufs(struct ena_adapter *adapter) 617 { 618 int i; 619 620 for (i = 0; i < adapter->num_queues; i++) 621 ena_free_rx_bufs(adapter, i); 622 } 623 624 static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, 625 struct ena_tx_buffer *tx_info) 626 { 627 struct ena_com_buf *ena_buf; 628 u32 cnt; 629 int i; 630 631 ena_buf = tx_info->bufs; 632 cnt = tx_info->num_of_bufs; 633 634 if (unlikely(!cnt)) 635 return; 636 637 if (tx_info->map_linear_data) { 638 dma_unmap_single(tx_ring->dev, 639 dma_unmap_addr(ena_buf, paddr), 640 dma_unmap_len(ena_buf, len), 641 DMA_TO_DEVICE); 642 ena_buf++; 643 cnt--; 644 } 645 646 /* unmap remaining mapped pages */ 647 for (i = 0; i < cnt; i++) { 648 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), 649 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); 650 ena_buf++; 651 } 652 } 653 654 /* ena_free_tx_bufs - Free Tx Buffers per Queue 655 * @tx_ring: TX ring for which buffers be freed 656 */ 657 static void ena_free_tx_bufs(struct ena_ring *tx_ring) 658 { 659 bool print_once = true; 660 u32 i; 661 662 for (i = 0; i < tx_ring->ring_size; i++) { 663 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 664 665 if (!tx_info->skb) 666 continue; 667 668 if (print_once) { 669 netdev_notice(tx_ring->netdev, 670 "free uncompleted tx skb qid %d idx 0x%x\n", 671 tx_ring->qid, i); 672 print_once = false; 673 } else { 674 netdev_dbg(tx_ring->netdev, 675 "free uncompleted tx skb qid %d idx 0x%x\n", 676 tx_ring->qid, i); 677 } 678 679 ena_unmap_tx_skb(tx_ring, tx_info); 680 681 dev_kfree_skb_any(tx_info->skb); 682 } 683 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, 684 tx_ring->qid)); 685 } 686 687 static void ena_free_all_tx_bufs(struct ena_adapter *adapter) 688 { 689 struct ena_ring *tx_ring; 690 int i; 691 692 for (i = 0; i < adapter->num_queues; i++) { 693 tx_ring = &adapter->tx_ring[i]; 694 ena_free_tx_bufs(tx_ring); 695 } 696 } 697 698 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) 699 { 700 u16 ena_qid; 701 int i; 702 703 for (i = 0; i < adapter->num_queues; i++) { 704 ena_qid = ENA_IO_TXQ_IDX(i); 705 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 706 } 707 } 708 709 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) 710 { 711 u16 ena_qid; 712 int i; 713 714 for (i = 0; i < adapter->num_queues; i++) { 715 ena_qid = ENA_IO_RXQ_IDX(i); 716 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 717 } 718 } 719 720 static void ena_destroy_all_io_queues(struct ena_adapter *adapter) 721 { 722 ena_destroy_all_tx_queues(adapter); 723 ena_destroy_all_rx_queues(adapter); 724 } 725 726 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 727 { 728 struct ena_tx_buffer *tx_info = NULL; 729 730 if (likely(req_id < tx_ring->ring_size)) { 731 tx_info = &tx_ring->tx_buffer_info[req_id]; 732 if (likely(tx_info->skb)) 733 return 0; 734 } 735 736 if (tx_info) 737 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, 738 "tx_info doesn't have valid skb\n"); 739 else 740 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, 741 "Invalid req_id: %hu\n", req_id); 742 743 u64_stats_update_begin(&tx_ring->syncp); 744 tx_ring->tx_stats.bad_req_id++; 745 u64_stats_update_end(&tx_ring->syncp); 746 747 /* Trigger device reset */ 748 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 749 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); 750 return -EFAULT; 751 } 752 753 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) 754 { 755 struct netdev_queue *txq; 756 bool above_thresh; 757 u32 tx_bytes = 0; 758 u32 total_done = 0; 759 u16 next_to_clean; 760 u16 req_id; 761 int tx_pkts = 0; 762 int rc; 763 764 next_to_clean = tx_ring->next_to_clean; 765 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); 766 767 while (tx_pkts < budget) { 768 struct ena_tx_buffer *tx_info; 769 struct sk_buff *skb; 770 771 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, 772 &req_id); 773 if (rc) 774 break; 775 776 rc = validate_tx_req_id(tx_ring, req_id); 777 if (rc) 778 break; 779 780 tx_info = &tx_ring->tx_buffer_info[req_id]; 781 skb = tx_info->skb; 782 783 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ 784 prefetch(&skb->end); 785 786 tx_info->skb = NULL; 787 tx_info->last_jiffies = 0; 788 789 ena_unmap_tx_skb(tx_ring, tx_info); 790 791 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 792 "tx_poll: q %d skb %p completed\n", tx_ring->qid, 793 skb); 794 795 tx_bytes += skb->len; 796 dev_kfree_skb(skb); 797 tx_pkts++; 798 total_done += tx_info->tx_descs; 799 800 tx_ring->free_tx_ids[next_to_clean] = req_id; 801 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 802 tx_ring->ring_size); 803 } 804 805 tx_ring->next_to_clean = next_to_clean; 806 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); 807 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 808 809 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 810 811 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 812 "tx_poll: q %d done. total pkts: %d\n", 813 tx_ring->qid, tx_pkts); 814 815 /* need to make the rings circular update visible to 816 * ena_start_xmit() before checking for netif_queue_stopped(). 817 */ 818 smp_mb(); 819 820 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 821 ENA_TX_WAKEUP_THRESH); 822 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { 823 __netif_tx_lock(txq, smp_processor_id()); 824 above_thresh = 825 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 826 ENA_TX_WAKEUP_THRESH); 827 if (netif_tx_queue_stopped(txq) && above_thresh) { 828 netif_tx_wake_queue(txq); 829 u64_stats_update_begin(&tx_ring->syncp); 830 tx_ring->tx_stats.queue_wakeup++; 831 u64_stats_update_end(&tx_ring->syncp); 832 } 833 __netif_tx_unlock(txq); 834 } 835 836 tx_ring->per_napi_bytes += tx_bytes; 837 tx_ring->per_napi_packets += tx_pkts; 838 839 return tx_pkts; 840 } 841 842 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) 843 { 844 struct sk_buff *skb; 845 846 if (frags) 847 skb = napi_get_frags(rx_ring->napi); 848 else 849 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 850 rx_ring->rx_copybreak); 851 852 if (unlikely(!skb)) { 853 u64_stats_update_begin(&rx_ring->syncp); 854 rx_ring->rx_stats.skb_alloc_fail++; 855 u64_stats_update_end(&rx_ring->syncp); 856 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 857 "Failed to allocate skb. frags: %d\n", frags); 858 return NULL; 859 } 860 861 return skb; 862 } 863 864 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, 865 struct ena_com_rx_buf_info *ena_bufs, 866 u32 descs, 867 u16 *next_to_clean) 868 { 869 struct sk_buff *skb; 870 struct ena_rx_buffer *rx_info; 871 u16 len, req_id, buf = 0; 872 void *va; 873 874 len = ena_bufs[buf].len; 875 req_id = ena_bufs[buf].req_id; 876 rx_info = &rx_ring->rx_buffer_info[req_id]; 877 878 if (unlikely(!rx_info->page)) { 879 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 880 "Page is NULL\n"); 881 return NULL; 882 } 883 884 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 885 "rx_info %p page %p\n", 886 rx_info, rx_info->page); 887 888 /* save virt address of first buffer */ 889 va = page_address(rx_info->page) + rx_info->page_offset; 890 prefetch(va + NET_IP_ALIGN); 891 892 if (len <= rx_ring->rx_copybreak) { 893 skb = ena_alloc_skb(rx_ring, false); 894 if (unlikely(!skb)) 895 return NULL; 896 897 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 898 "rx allocated small packet. len %d. data_len %d\n", 899 skb->len, skb->data_len); 900 901 /* sync this buffer for CPU use */ 902 dma_sync_single_for_cpu(rx_ring->dev, 903 dma_unmap_addr(&rx_info->ena_buf, paddr), 904 len, 905 DMA_FROM_DEVICE); 906 skb_copy_to_linear_data(skb, va, len); 907 dma_sync_single_for_device(rx_ring->dev, 908 dma_unmap_addr(&rx_info->ena_buf, paddr), 909 len, 910 DMA_FROM_DEVICE); 911 912 skb_put(skb, len); 913 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 914 rx_ring->free_rx_ids[*next_to_clean] = req_id; 915 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, 916 rx_ring->ring_size); 917 return skb; 918 } 919 920 skb = ena_alloc_skb(rx_ring, true); 921 if (unlikely(!skb)) 922 return NULL; 923 924 do { 925 dma_unmap_page(rx_ring->dev, 926 dma_unmap_addr(&rx_info->ena_buf, paddr), 927 ENA_PAGE_SIZE, DMA_FROM_DEVICE); 928 929 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 930 rx_info->page_offset, len, ENA_PAGE_SIZE); 931 932 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 933 "rx skb updated. len %d. data_len %d\n", 934 skb->len, skb->data_len); 935 936 rx_info->page = NULL; 937 938 rx_ring->free_rx_ids[*next_to_clean] = req_id; 939 *next_to_clean = 940 ENA_RX_RING_IDX_NEXT(*next_to_clean, 941 rx_ring->ring_size); 942 if (likely(--descs == 0)) 943 break; 944 945 buf++; 946 len = ena_bufs[buf].len; 947 req_id = ena_bufs[buf].req_id; 948 rx_info = &rx_ring->rx_buffer_info[req_id]; 949 } while (1); 950 951 return skb; 952 } 953 954 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum 955 * @adapter: structure containing adapter specific data 956 * @ena_rx_ctx: received packet context/metadata 957 * @skb: skb currently being received and modified 958 */ 959 static inline void ena_rx_checksum(struct ena_ring *rx_ring, 960 struct ena_com_rx_ctx *ena_rx_ctx, 961 struct sk_buff *skb) 962 { 963 /* Rx csum disabled */ 964 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { 965 skb->ip_summed = CHECKSUM_NONE; 966 return; 967 } 968 969 /* For fragmented packets the checksum isn't valid */ 970 if (ena_rx_ctx->frag) { 971 skb->ip_summed = CHECKSUM_NONE; 972 return; 973 } 974 975 /* if IP and error */ 976 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 977 (ena_rx_ctx->l3_csum_err))) { 978 /* ipv4 checksum error */ 979 skb->ip_summed = CHECKSUM_NONE; 980 u64_stats_update_begin(&rx_ring->syncp); 981 rx_ring->rx_stats.bad_csum++; 982 u64_stats_update_end(&rx_ring->syncp); 983 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 984 "RX IPv4 header checksum error\n"); 985 return; 986 } 987 988 /* if TCP/UDP */ 989 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 990 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { 991 if (unlikely(ena_rx_ctx->l4_csum_err)) { 992 /* TCP/UDP checksum error */ 993 u64_stats_update_begin(&rx_ring->syncp); 994 rx_ring->rx_stats.bad_csum++; 995 u64_stats_update_end(&rx_ring->syncp); 996 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 997 "RX L4 checksum error\n"); 998 skb->ip_summed = CHECKSUM_NONE; 999 return; 1000 } 1001 1002 if (likely(ena_rx_ctx->l4_csum_checked)) { 1003 skb->ip_summed = CHECKSUM_UNNECESSARY; 1004 } else { 1005 u64_stats_update_begin(&rx_ring->syncp); 1006 rx_ring->rx_stats.csum_unchecked++; 1007 u64_stats_update_end(&rx_ring->syncp); 1008 skb->ip_summed = CHECKSUM_NONE; 1009 } 1010 } else { 1011 skb->ip_summed = CHECKSUM_NONE; 1012 return; 1013 } 1014 1015 } 1016 1017 static void ena_set_rx_hash(struct ena_ring *rx_ring, 1018 struct ena_com_rx_ctx *ena_rx_ctx, 1019 struct sk_buff *skb) 1020 { 1021 enum pkt_hash_types hash_type; 1022 1023 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { 1024 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 1025 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) 1026 1027 hash_type = PKT_HASH_TYPE_L4; 1028 else 1029 hash_type = PKT_HASH_TYPE_NONE; 1030 1031 /* Override hash type if the packet is fragmented */ 1032 if (ena_rx_ctx->frag) 1033 hash_type = PKT_HASH_TYPE_NONE; 1034 1035 skb_set_hash(skb, ena_rx_ctx->hash, hash_type); 1036 } 1037 } 1038 1039 /* ena_clean_rx_irq - Cleanup RX irq 1040 * @rx_ring: RX ring to clean 1041 * @napi: napi handler 1042 * @budget: how many packets driver is allowed to clean 1043 * 1044 * Returns the number of cleaned buffers. 1045 */ 1046 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, 1047 u32 budget) 1048 { 1049 u16 next_to_clean = rx_ring->next_to_clean; 1050 u32 res_budget, work_done; 1051 1052 struct ena_com_rx_ctx ena_rx_ctx; 1053 struct ena_adapter *adapter; 1054 struct sk_buff *skb; 1055 int refill_required; 1056 int refill_threshold; 1057 int rc = 0; 1058 int total_len = 0; 1059 int rx_copybreak_pkt = 0; 1060 int i; 1061 1062 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1063 "%s qid %d\n", __func__, rx_ring->qid); 1064 res_budget = budget; 1065 1066 do { 1067 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1068 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 1069 ena_rx_ctx.descs = 0; 1070 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1071 rx_ring->ena_com_io_sq, 1072 &ena_rx_ctx); 1073 if (unlikely(rc)) 1074 goto error; 1075 1076 if (unlikely(ena_rx_ctx.descs == 0)) 1077 break; 1078 1079 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1080 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", 1081 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 1082 ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 1083 1084 /* allocate skb and fill it */ 1085 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, 1086 &next_to_clean); 1087 1088 /* exit if we failed to retrieve a buffer */ 1089 if (unlikely(!skb)) { 1090 for (i = 0; i < ena_rx_ctx.descs; i++) { 1091 rx_ring->free_tx_ids[next_to_clean] = 1092 rx_ring->ena_bufs[i].req_id; 1093 next_to_clean = 1094 ENA_RX_RING_IDX_NEXT(next_to_clean, 1095 rx_ring->ring_size); 1096 } 1097 break; 1098 } 1099 1100 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); 1101 1102 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); 1103 1104 skb_record_rx_queue(skb, rx_ring->qid); 1105 1106 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { 1107 total_len += rx_ring->ena_bufs[0].len; 1108 rx_copybreak_pkt++; 1109 napi_gro_receive(napi, skb); 1110 } else { 1111 total_len += skb->len; 1112 napi_gro_frags(napi); 1113 } 1114 1115 res_budget--; 1116 } while (likely(res_budget)); 1117 1118 work_done = budget - res_budget; 1119 rx_ring->per_napi_bytes += total_len; 1120 rx_ring->per_napi_packets += work_done; 1121 u64_stats_update_begin(&rx_ring->syncp); 1122 rx_ring->rx_stats.bytes += total_len; 1123 rx_ring->rx_stats.cnt += work_done; 1124 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; 1125 u64_stats_update_end(&rx_ring->syncp); 1126 1127 rx_ring->next_to_clean = next_to_clean; 1128 1129 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); 1130 refill_threshold = 1131 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 1132 ENA_RX_REFILL_THRESH_PACKET); 1133 1134 /* Optimization, try to batch new rx buffers */ 1135 if (refill_required > refill_threshold) { 1136 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 1137 ena_refill_rx_bufs(rx_ring, refill_required); 1138 } 1139 1140 return work_done; 1141 1142 error: 1143 adapter = netdev_priv(rx_ring->netdev); 1144 1145 u64_stats_update_begin(&rx_ring->syncp); 1146 rx_ring->rx_stats.bad_desc_num++; 1147 u64_stats_update_end(&rx_ring->syncp); 1148 1149 /* Too many desc from the device. Trigger reset */ 1150 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1151 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 1152 1153 return 0; 1154 } 1155 1156 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, 1157 struct ena_ring *tx_ring) 1158 { 1159 /* We apply adaptive moderation on Rx path only. 1160 * Tx uses static interrupt moderation. 1161 */ 1162 ena_com_calculate_interrupt_delay(rx_ring->ena_dev, 1163 rx_ring->per_napi_packets, 1164 rx_ring->per_napi_bytes, 1165 &rx_ring->smoothed_interval, 1166 &rx_ring->moder_tbl_idx); 1167 1168 /* Reset per napi packets/bytes */ 1169 tx_ring->per_napi_packets = 0; 1170 tx_ring->per_napi_bytes = 0; 1171 rx_ring->per_napi_packets = 0; 1172 rx_ring->per_napi_bytes = 0; 1173 } 1174 1175 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, 1176 struct ena_ring *rx_ring) 1177 { 1178 struct ena_eth_io_intr_reg intr_reg; 1179 1180 /* Update intr register: rx intr delay, 1181 * tx intr delay and interrupt unmask 1182 */ 1183 ena_com_update_intr_reg(&intr_reg, 1184 rx_ring->smoothed_interval, 1185 tx_ring->smoothed_interval, 1186 true); 1187 1188 /* It is a shared MSI-X. 1189 * Tx and Rx CQ have pointer to it. 1190 * So we use one of them to reach the intr reg 1191 */ 1192 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); 1193 } 1194 1195 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1196 struct ena_ring *rx_ring) 1197 { 1198 int cpu = get_cpu(); 1199 int numa_node; 1200 1201 /* Check only one ring since the 2 rings are running on the same cpu */ 1202 if (likely(tx_ring->cpu == cpu)) 1203 goto out; 1204 1205 numa_node = cpu_to_node(cpu); 1206 put_cpu(); 1207 1208 if (numa_node != NUMA_NO_NODE) { 1209 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); 1210 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); 1211 } 1212 1213 tx_ring->cpu = cpu; 1214 rx_ring->cpu = cpu; 1215 1216 return; 1217 out: 1218 put_cpu(); 1219 } 1220 1221 static int ena_io_poll(struct napi_struct *napi, int budget) 1222 { 1223 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1224 struct ena_ring *tx_ring, *rx_ring; 1225 1226 u32 tx_work_done; 1227 u32 rx_work_done; 1228 int tx_budget; 1229 int napi_comp_call = 0; 1230 int ret; 1231 1232 tx_ring = ena_napi->tx_ring; 1233 rx_ring = ena_napi->rx_ring; 1234 1235 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; 1236 1237 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1238 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { 1239 napi_complete_done(napi, 0); 1240 return 0; 1241 } 1242 1243 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); 1244 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); 1245 1246 /* If the device is about to reset or down, avoid unmask 1247 * the interrupt and return 0 so NAPI won't reschedule 1248 */ 1249 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1250 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { 1251 napi_complete_done(napi, 0); 1252 ret = 0; 1253 1254 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { 1255 napi_comp_call = 1; 1256 1257 /* Update numa and unmask the interrupt only when schedule 1258 * from the interrupt context (vs from sk_busy_loop) 1259 */ 1260 if (napi_complete_done(napi, rx_work_done)) { 1261 /* Tx and Rx share the same interrupt vector */ 1262 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1263 ena_adjust_intr_moderation(rx_ring, tx_ring); 1264 1265 ena_unmask_interrupt(tx_ring, rx_ring); 1266 } 1267 1268 ena_update_ring_numa_node(tx_ring, rx_ring); 1269 1270 ret = rx_work_done; 1271 } else { 1272 ret = budget; 1273 } 1274 1275 u64_stats_update_begin(&tx_ring->syncp); 1276 tx_ring->tx_stats.napi_comp += napi_comp_call; 1277 tx_ring->tx_stats.tx_poll++; 1278 u64_stats_update_end(&tx_ring->syncp); 1279 1280 return ret; 1281 } 1282 1283 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) 1284 { 1285 struct ena_adapter *adapter = (struct ena_adapter *)data; 1286 1287 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1288 1289 /* Don't call the aenq handler before probe is done */ 1290 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) 1291 ena_com_aenq_intr_handler(adapter->ena_dev, data); 1292 1293 return IRQ_HANDLED; 1294 } 1295 1296 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx 1297 * @irq: interrupt number 1298 * @data: pointer to a network interface private napi device structure 1299 */ 1300 static irqreturn_t ena_intr_msix_io(int irq, void *data) 1301 { 1302 struct ena_napi *ena_napi = data; 1303 1304 ena_napi->tx_ring->first_interrupt = true; 1305 ena_napi->rx_ring->first_interrupt = true; 1306 1307 napi_schedule_irqoff(&ena_napi->napi); 1308 1309 return IRQ_HANDLED; 1310 } 1311 1312 /* Reserve a single MSI-X vector for management (admin + aenq). 1313 * plus reserve one vector for each potential io queue. 1314 * the number of potential io queues is the minimum of what the device 1315 * supports and the number of vCPUs. 1316 */ 1317 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) 1318 { 1319 int msix_vecs, irq_cnt; 1320 1321 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 1322 netif_err(adapter, probe, adapter->netdev, 1323 "Error, MSI-X is already enabled\n"); 1324 return -EPERM; 1325 } 1326 1327 /* Reserved the max msix vectors we might need */ 1328 msix_vecs = ENA_MAX_MSIX_VEC(num_queues); 1329 netif_dbg(adapter, probe, adapter->netdev, 1330 "trying to enable MSI-X, vectors %d\n", msix_vecs); 1331 1332 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, 1333 msix_vecs, PCI_IRQ_MSIX); 1334 1335 if (irq_cnt < 0) { 1336 netif_err(adapter, probe, adapter->netdev, 1337 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); 1338 return -ENOSPC; 1339 } 1340 1341 if (irq_cnt != msix_vecs) { 1342 netif_notice(adapter, probe, adapter->netdev, 1343 "enable only %d MSI-X (out of %d), reduce the number of queues\n", 1344 irq_cnt, msix_vecs); 1345 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; 1346 } 1347 1348 if (ena_init_rx_cpu_rmap(adapter)) 1349 netif_warn(adapter, probe, adapter->netdev, 1350 "Failed to map IRQs to CPUs\n"); 1351 1352 adapter->msix_vecs = irq_cnt; 1353 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); 1354 1355 return 0; 1356 } 1357 1358 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) 1359 { 1360 u32 cpu; 1361 1362 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 1363 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 1364 pci_name(adapter->pdev)); 1365 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = 1366 ena_intr_msix_mgmnt; 1367 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 1368 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 1369 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); 1370 cpu = cpumask_first(cpu_online_mask); 1371 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; 1372 cpumask_set_cpu(cpu, 1373 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); 1374 } 1375 1376 static void ena_setup_io_intr(struct ena_adapter *adapter) 1377 { 1378 struct net_device *netdev; 1379 int irq_idx, i, cpu; 1380 1381 netdev = adapter->netdev; 1382 1383 for (i = 0; i < adapter->num_queues; i++) { 1384 irq_idx = ENA_IO_IRQ_IDX(i); 1385 cpu = i % num_online_cpus(); 1386 1387 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 1388 "%s-Tx-Rx-%d", netdev->name, i); 1389 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; 1390 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; 1391 adapter->irq_tbl[irq_idx].vector = 1392 pci_irq_vector(adapter->pdev, irq_idx); 1393 adapter->irq_tbl[irq_idx].cpu = cpu; 1394 1395 cpumask_set_cpu(cpu, 1396 &adapter->irq_tbl[irq_idx].affinity_hint_mask); 1397 } 1398 } 1399 1400 static int ena_request_mgmnt_irq(struct ena_adapter *adapter) 1401 { 1402 unsigned long flags = 0; 1403 struct ena_irq *irq; 1404 int rc; 1405 1406 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1407 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 1408 irq->data); 1409 if (rc) { 1410 netif_err(adapter, probe, adapter->netdev, 1411 "failed to request admin irq\n"); 1412 return rc; 1413 } 1414 1415 netif_dbg(adapter, probe, adapter->netdev, 1416 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", 1417 irq->affinity_hint_mask.bits[0], irq->vector); 1418 1419 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 1420 1421 return rc; 1422 } 1423 1424 static int ena_request_io_irq(struct ena_adapter *adapter) 1425 { 1426 unsigned long flags = 0; 1427 struct ena_irq *irq; 1428 int rc = 0, i, k; 1429 1430 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 1431 netif_err(adapter, ifup, adapter->netdev, 1432 "Failed to request I/O IRQ: MSI-X is not enabled\n"); 1433 return -EINVAL; 1434 } 1435 1436 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1437 irq = &adapter->irq_tbl[i]; 1438 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 1439 irq->data); 1440 if (rc) { 1441 netif_err(adapter, ifup, adapter->netdev, 1442 "Failed to request I/O IRQ. index %d rc %d\n", 1443 i, rc); 1444 goto err; 1445 } 1446 1447 netif_dbg(adapter, ifup, adapter->netdev, 1448 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", 1449 i, irq->affinity_hint_mask.bits[0], irq->vector); 1450 1451 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 1452 } 1453 1454 return rc; 1455 1456 err: 1457 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { 1458 irq = &adapter->irq_tbl[k]; 1459 free_irq(irq->vector, irq->data); 1460 } 1461 1462 return rc; 1463 } 1464 1465 static void ena_free_mgmnt_irq(struct ena_adapter *adapter) 1466 { 1467 struct ena_irq *irq; 1468 1469 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1470 synchronize_irq(irq->vector); 1471 irq_set_affinity_hint(irq->vector, NULL); 1472 free_irq(irq->vector, irq->data); 1473 } 1474 1475 static void ena_free_io_irq(struct ena_adapter *adapter) 1476 { 1477 struct ena_irq *irq; 1478 int i; 1479 1480 #ifdef CONFIG_RFS_ACCEL 1481 if (adapter->msix_vecs >= 1) { 1482 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 1483 adapter->netdev->rx_cpu_rmap = NULL; 1484 } 1485 #endif /* CONFIG_RFS_ACCEL */ 1486 1487 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1488 irq = &adapter->irq_tbl[i]; 1489 irq_set_affinity_hint(irq->vector, NULL); 1490 free_irq(irq->vector, irq->data); 1491 } 1492 } 1493 1494 static void ena_disable_msix(struct ena_adapter *adapter) 1495 { 1496 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) 1497 pci_free_irq_vectors(adapter->pdev); 1498 } 1499 1500 static void ena_disable_io_intr_sync(struct ena_adapter *adapter) 1501 { 1502 int i; 1503 1504 if (!netif_running(adapter->netdev)) 1505 return; 1506 1507 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) 1508 synchronize_irq(adapter->irq_tbl[i].vector); 1509 } 1510 1511 static void ena_del_napi(struct ena_adapter *adapter) 1512 { 1513 int i; 1514 1515 for (i = 0; i < adapter->num_queues; i++) 1516 netif_napi_del(&adapter->ena_napi[i].napi); 1517 } 1518 1519 static void ena_init_napi(struct ena_adapter *adapter) 1520 { 1521 struct ena_napi *napi; 1522 int i; 1523 1524 for (i = 0; i < adapter->num_queues; i++) { 1525 napi = &adapter->ena_napi[i]; 1526 1527 netif_napi_add(adapter->netdev, 1528 &adapter->ena_napi[i].napi, 1529 ena_io_poll, 1530 ENA_NAPI_BUDGET); 1531 napi->rx_ring = &adapter->rx_ring[i]; 1532 napi->tx_ring = &adapter->tx_ring[i]; 1533 napi->qid = i; 1534 } 1535 } 1536 1537 static void ena_napi_disable_all(struct ena_adapter *adapter) 1538 { 1539 int i; 1540 1541 for (i = 0; i < adapter->num_queues; i++) 1542 napi_disable(&adapter->ena_napi[i].napi); 1543 } 1544 1545 static void ena_napi_enable_all(struct ena_adapter *adapter) 1546 { 1547 int i; 1548 1549 for (i = 0; i < adapter->num_queues; i++) 1550 napi_enable(&adapter->ena_napi[i].napi); 1551 } 1552 1553 static void ena_restore_ethtool_params(struct ena_adapter *adapter) 1554 { 1555 adapter->tx_usecs = 0; 1556 adapter->rx_usecs = 0; 1557 adapter->tx_frames = 1; 1558 adapter->rx_frames = 1; 1559 } 1560 1561 /* Configure the Rx forwarding */ 1562 static int ena_rss_configure(struct ena_adapter *adapter) 1563 { 1564 struct ena_com_dev *ena_dev = adapter->ena_dev; 1565 int rc; 1566 1567 /* In case the RSS table wasn't initialized by probe */ 1568 if (!ena_dev->rss.tbl_log_size) { 1569 rc = ena_rss_init_default(adapter); 1570 if (rc && (rc != -EOPNOTSUPP)) { 1571 netif_err(adapter, ifup, adapter->netdev, 1572 "Failed to init RSS rc: %d\n", rc); 1573 return rc; 1574 } 1575 } 1576 1577 /* Set indirect table */ 1578 rc = ena_com_indirect_table_set(ena_dev); 1579 if (unlikely(rc && rc != -EOPNOTSUPP)) 1580 return rc; 1581 1582 /* Configure hash function (if supported) */ 1583 rc = ena_com_set_hash_function(ena_dev); 1584 if (unlikely(rc && (rc != -EOPNOTSUPP))) 1585 return rc; 1586 1587 /* Configure hash inputs (if supported) */ 1588 rc = ena_com_set_hash_ctrl(ena_dev); 1589 if (unlikely(rc && (rc != -EOPNOTSUPP))) 1590 return rc; 1591 1592 return 0; 1593 } 1594 1595 static int ena_up_complete(struct ena_adapter *adapter) 1596 { 1597 int rc; 1598 1599 rc = ena_rss_configure(adapter); 1600 if (rc) 1601 return rc; 1602 1603 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 1604 1605 ena_refill_all_rx_bufs(adapter); 1606 1607 /* enable transmits */ 1608 netif_tx_start_all_queues(adapter->netdev); 1609 1610 ena_restore_ethtool_params(adapter); 1611 1612 ena_napi_enable_all(adapter); 1613 1614 return 0; 1615 } 1616 1617 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) 1618 { 1619 struct ena_com_create_io_ctx ctx; 1620 struct ena_com_dev *ena_dev; 1621 struct ena_ring *tx_ring; 1622 u32 msix_vector; 1623 u16 ena_qid; 1624 int rc; 1625 1626 ena_dev = adapter->ena_dev; 1627 1628 tx_ring = &adapter->tx_ring[qid]; 1629 msix_vector = ENA_IO_IRQ_IDX(qid); 1630 ena_qid = ENA_IO_TXQ_IDX(qid); 1631 1632 memset(&ctx, 0x0, sizeof(ctx)); 1633 1634 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1635 ctx.qid = ena_qid; 1636 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1637 ctx.msix_vector = msix_vector; 1638 ctx.queue_size = adapter->tx_ring_size; 1639 ctx.numa_node = cpu_to_node(tx_ring->cpu); 1640 1641 rc = ena_com_create_io_queue(ena_dev, &ctx); 1642 if (rc) { 1643 netif_err(adapter, ifup, adapter->netdev, 1644 "Failed to create I/O TX queue num %d rc: %d\n", 1645 qid, rc); 1646 return rc; 1647 } 1648 1649 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1650 &tx_ring->ena_com_io_sq, 1651 &tx_ring->ena_com_io_cq); 1652 if (rc) { 1653 netif_err(adapter, ifup, adapter->netdev, 1654 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1655 qid, rc); 1656 ena_com_destroy_io_queue(ena_dev, ena_qid); 1657 return rc; 1658 } 1659 1660 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); 1661 return rc; 1662 } 1663 1664 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) 1665 { 1666 struct ena_com_dev *ena_dev = adapter->ena_dev; 1667 int rc, i; 1668 1669 for (i = 0; i < adapter->num_queues; i++) { 1670 rc = ena_create_io_tx_queue(adapter, i); 1671 if (rc) 1672 goto create_err; 1673 } 1674 1675 return 0; 1676 1677 create_err: 1678 while (i--) 1679 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 1680 1681 return rc; 1682 } 1683 1684 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) 1685 { 1686 struct ena_com_dev *ena_dev; 1687 struct ena_com_create_io_ctx ctx; 1688 struct ena_ring *rx_ring; 1689 u32 msix_vector; 1690 u16 ena_qid; 1691 int rc; 1692 1693 ena_dev = adapter->ena_dev; 1694 1695 rx_ring = &adapter->rx_ring[qid]; 1696 msix_vector = ENA_IO_IRQ_IDX(qid); 1697 ena_qid = ENA_IO_RXQ_IDX(qid); 1698 1699 memset(&ctx, 0x0, sizeof(ctx)); 1700 1701 ctx.qid = ena_qid; 1702 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1703 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1704 ctx.msix_vector = msix_vector; 1705 ctx.queue_size = adapter->rx_ring_size; 1706 ctx.numa_node = cpu_to_node(rx_ring->cpu); 1707 1708 rc = ena_com_create_io_queue(ena_dev, &ctx); 1709 if (rc) { 1710 netif_err(adapter, ifup, adapter->netdev, 1711 "Failed to create I/O RX queue num %d rc: %d\n", 1712 qid, rc); 1713 return rc; 1714 } 1715 1716 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1717 &rx_ring->ena_com_io_sq, 1718 &rx_ring->ena_com_io_cq); 1719 if (rc) { 1720 netif_err(adapter, ifup, adapter->netdev, 1721 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1722 qid, rc); 1723 ena_com_destroy_io_queue(ena_dev, ena_qid); 1724 return rc; 1725 } 1726 1727 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); 1728 1729 return rc; 1730 } 1731 1732 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) 1733 { 1734 struct ena_com_dev *ena_dev = adapter->ena_dev; 1735 int rc, i; 1736 1737 for (i = 0; i < adapter->num_queues; i++) { 1738 rc = ena_create_io_rx_queue(adapter, i); 1739 if (rc) 1740 goto create_err; 1741 } 1742 1743 return 0; 1744 1745 create_err: 1746 while (i--) 1747 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 1748 1749 return rc; 1750 } 1751 1752 static int ena_up(struct ena_adapter *adapter) 1753 { 1754 int rc, i; 1755 1756 netdev_dbg(adapter->netdev, "%s\n", __func__); 1757 1758 ena_setup_io_intr(adapter); 1759 1760 /* napi poll functions should be initialized before running 1761 * request_irq(), to handle a rare condition where there is a pending 1762 * interrupt, causing the ISR to fire immediately while the poll 1763 * function wasn't set yet, causing a null dereference 1764 */ 1765 ena_init_napi(adapter); 1766 1767 rc = ena_request_io_irq(adapter); 1768 if (rc) 1769 goto err_req_irq; 1770 1771 /* allocate transmit descriptors */ 1772 rc = ena_setup_all_tx_resources(adapter); 1773 if (rc) 1774 goto err_setup_tx; 1775 1776 /* allocate receive descriptors */ 1777 rc = ena_setup_all_rx_resources(adapter); 1778 if (rc) 1779 goto err_setup_rx; 1780 1781 /* Create TX queues */ 1782 rc = ena_create_all_io_tx_queues(adapter); 1783 if (rc) 1784 goto err_create_tx_queues; 1785 1786 /* Create RX queues */ 1787 rc = ena_create_all_io_rx_queues(adapter); 1788 if (rc) 1789 goto err_create_rx_queues; 1790 1791 rc = ena_up_complete(adapter); 1792 if (rc) 1793 goto err_up; 1794 1795 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 1796 netif_carrier_on(adapter->netdev); 1797 1798 u64_stats_update_begin(&adapter->syncp); 1799 adapter->dev_stats.interface_up++; 1800 u64_stats_update_end(&adapter->syncp); 1801 1802 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1803 1804 /* Enable completion queues interrupt */ 1805 for (i = 0; i < adapter->num_queues; i++) 1806 ena_unmask_interrupt(&adapter->tx_ring[i], 1807 &adapter->rx_ring[i]); 1808 1809 /* schedule napi in case we had pending packets 1810 * from the last time we disable napi 1811 */ 1812 for (i = 0; i < adapter->num_queues; i++) 1813 napi_schedule(&adapter->ena_napi[i].napi); 1814 1815 return rc; 1816 1817 err_up: 1818 ena_destroy_all_rx_queues(adapter); 1819 err_create_rx_queues: 1820 ena_destroy_all_tx_queues(adapter); 1821 err_create_tx_queues: 1822 ena_free_all_io_rx_resources(adapter); 1823 err_setup_rx: 1824 ena_free_all_io_tx_resources(adapter); 1825 err_setup_tx: 1826 ena_free_io_irq(adapter); 1827 err_req_irq: 1828 ena_del_napi(adapter); 1829 1830 return rc; 1831 } 1832 1833 static void ena_down(struct ena_adapter *adapter) 1834 { 1835 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); 1836 1837 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1838 1839 u64_stats_update_begin(&adapter->syncp); 1840 adapter->dev_stats.interface_down++; 1841 u64_stats_update_end(&adapter->syncp); 1842 1843 netif_carrier_off(adapter->netdev); 1844 netif_tx_disable(adapter->netdev); 1845 1846 /* After this point the napi handler won't enable the tx queue */ 1847 ena_napi_disable_all(adapter); 1848 1849 /* After destroy the queue there won't be any new interrupts */ 1850 1851 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { 1852 int rc; 1853 1854 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 1855 if (rc) 1856 dev_err(&adapter->pdev->dev, "Device reset failed\n"); 1857 /* stop submitting admin commands on a device that was reset */ 1858 ena_com_set_admin_running_state(adapter->ena_dev, false); 1859 } 1860 1861 ena_destroy_all_io_queues(adapter); 1862 1863 ena_disable_io_intr_sync(adapter); 1864 ena_free_io_irq(adapter); 1865 ena_del_napi(adapter); 1866 1867 ena_free_all_tx_bufs(adapter); 1868 ena_free_all_rx_bufs(adapter); 1869 ena_free_all_io_tx_resources(adapter); 1870 ena_free_all_io_rx_resources(adapter); 1871 } 1872 1873 /* ena_open - Called when a network interface is made active 1874 * @netdev: network interface device structure 1875 * 1876 * Returns 0 on success, negative value on failure 1877 * 1878 * The open entry point is called when a network interface is made 1879 * active by the system (IFF_UP). At this point all resources needed 1880 * for transmit and receive operations are allocated, the interrupt 1881 * handler is registered with the OS, the watchdog timer is started, 1882 * and the stack is notified that the interface is ready. 1883 */ 1884 static int ena_open(struct net_device *netdev) 1885 { 1886 struct ena_adapter *adapter = netdev_priv(netdev); 1887 int rc; 1888 1889 /* Notify the stack of the actual queue counts. */ 1890 rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues); 1891 if (rc) { 1892 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); 1893 return rc; 1894 } 1895 1896 rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues); 1897 if (rc) { 1898 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); 1899 return rc; 1900 } 1901 1902 rc = ena_up(adapter); 1903 if (rc) 1904 return rc; 1905 1906 return rc; 1907 } 1908 1909 /* ena_close - Disables a network interface 1910 * @netdev: network interface device structure 1911 * 1912 * Returns 0, this is not allowed to fail 1913 * 1914 * The close entry point is called when an interface is de-activated 1915 * by the OS. The hardware is still under the drivers control, but 1916 * needs to be disabled. A global MAC reset is issued to stop the 1917 * hardware, and all transmit and receive resources are freed. 1918 */ 1919 static int ena_close(struct net_device *netdev) 1920 { 1921 struct ena_adapter *adapter = netdev_priv(netdev); 1922 1923 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 1924 1925 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 1926 return 0; 1927 1928 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1929 ena_down(adapter); 1930 1931 /* Check for device status and issue reset if needed*/ 1932 check_for_admin_com_state(adapter); 1933 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 1934 netif_err(adapter, ifdown, adapter->netdev, 1935 "Destroy failure, restarting device\n"); 1936 ena_dump_stats_to_dmesg(adapter); 1937 /* rtnl lock already obtained in dev_ioctl() layer */ 1938 ena_destroy_device(adapter, false); 1939 ena_restore_device(adapter); 1940 } 1941 1942 return 0; 1943 } 1944 1945 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) 1946 { 1947 u32 mss = skb_shinfo(skb)->gso_size; 1948 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 1949 u8 l4_protocol = 0; 1950 1951 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { 1952 ena_tx_ctx->l4_csum_enable = 1; 1953 if (mss) { 1954 ena_tx_ctx->tso_enable = 1; 1955 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; 1956 ena_tx_ctx->l4_csum_partial = 0; 1957 } else { 1958 ena_tx_ctx->tso_enable = 0; 1959 ena_meta->l4_hdr_len = 0; 1960 ena_tx_ctx->l4_csum_partial = 1; 1961 } 1962 1963 switch (ip_hdr(skb)->version) { 1964 case IPVERSION: 1965 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 1966 if (ip_hdr(skb)->frag_off & htons(IP_DF)) 1967 ena_tx_ctx->df = 1; 1968 if (mss) 1969 ena_tx_ctx->l3_csum_enable = 1; 1970 l4_protocol = ip_hdr(skb)->protocol; 1971 break; 1972 case 6: 1973 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 1974 l4_protocol = ipv6_hdr(skb)->nexthdr; 1975 break; 1976 default: 1977 break; 1978 } 1979 1980 if (l4_protocol == IPPROTO_TCP) 1981 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 1982 else 1983 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 1984 1985 ena_meta->mss = mss; 1986 ena_meta->l3_hdr_len = skb_network_header_len(skb); 1987 ena_meta->l3_hdr_offset = skb_network_offset(skb); 1988 ena_tx_ctx->meta_valid = 1; 1989 1990 } else { 1991 ena_tx_ctx->meta_valid = 0; 1992 } 1993 } 1994 1995 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, 1996 struct sk_buff *skb) 1997 { 1998 int num_frags, header_len, rc; 1999 2000 num_frags = skb_shinfo(skb)->nr_frags; 2001 header_len = skb_headlen(skb); 2002 2003 if (num_frags < tx_ring->sgl_size) 2004 return 0; 2005 2006 if ((num_frags == tx_ring->sgl_size) && 2007 (header_len < tx_ring->tx_max_header_size)) 2008 return 0; 2009 2010 u64_stats_update_begin(&tx_ring->syncp); 2011 tx_ring->tx_stats.linearize++; 2012 u64_stats_update_end(&tx_ring->syncp); 2013 2014 rc = skb_linearize(skb); 2015 if (unlikely(rc)) { 2016 u64_stats_update_begin(&tx_ring->syncp); 2017 tx_ring->tx_stats.linearize_failed++; 2018 u64_stats_update_end(&tx_ring->syncp); 2019 } 2020 2021 return rc; 2022 } 2023 2024 static int ena_tx_map_skb(struct ena_ring *tx_ring, 2025 struct ena_tx_buffer *tx_info, 2026 struct sk_buff *skb, 2027 void **push_hdr, 2028 u16 *header_len) 2029 { 2030 struct ena_adapter *adapter = tx_ring->adapter; 2031 struct ena_com_buf *ena_buf; 2032 dma_addr_t dma; 2033 u32 skb_head_len, frag_len, last_frag; 2034 u16 push_len = 0; 2035 u16 delta = 0; 2036 int i = 0; 2037 2038 skb_head_len = skb_headlen(skb); 2039 tx_info->skb = skb; 2040 ena_buf = tx_info->bufs; 2041 2042 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2043 /* When the device is LLQ mode, the driver will copy 2044 * the header into the device memory space. 2045 * the ena_com layer assume the header is in a linear 2046 * memory space. 2047 * This assumption might be wrong since part of the header 2048 * can be in the fragmented buffers. 2049 * Use skb_header_pointer to make sure the header is in a 2050 * linear memory space. 2051 */ 2052 2053 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); 2054 *push_hdr = skb_header_pointer(skb, 0, push_len, 2055 tx_ring->push_buf_intermediate_buf); 2056 *header_len = push_len; 2057 if (unlikely(skb->data != *push_hdr)) { 2058 u64_stats_update_begin(&tx_ring->syncp); 2059 tx_ring->tx_stats.llq_buffer_copy++; 2060 u64_stats_update_end(&tx_ring->syncp); 2061 2062 delta = push_len - skb_head_len; 2063 } 2064 } else { 2065 *push_hdr = NULL; 2066 *header_len = min_t(u32, skb_head_len, 2067 tx_ring->tx_max_header_size); 2068 } 2069 2070 netif_dbg(adapter, tx_queued, adapter->netdev, 2071 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, 2072 *push_hdr, push_len); 2073 2074 if (skb_head_len > push_len) { 2075 dma = dma_map_single(tx_ring->dev, skb->data + push_len, 2076 skb_head_len - push_len, DMA_TO_DEVICE); 2077 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 2078 goto error_report_dma_error; 2079 2080 ena_buf->paddr = dma; 2081 ena_buf->len = skb_head_len - push_len; 2082 2083 ena_buf++; 2084 tx_info->num_of_bufs++; 2085 tx_info->map_linear_data = 1; 2086 } else { 2087 tx_info->map_linear_data = 0; 2088 } 2089 2090 last_frag = skb_shinfo(skb)->nr_frags; 2091 2092 for (i = 0; i < last_frag; i++) { 2093 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2094 2095 frag_len = skb_frag_size(frag); 2096 2097 if (unlikely(delta >= frag_len)) { 2098 delta -= frag_len; 2099 continue; 2100 } 2101 2102 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, 2103 frag_len - delta, DMA_TO_DEVICE); 2104 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 2105 goto error_report_dma_error; 2106 2107 ena_buf->paddr = dma; 2108 ena_buf->len = frag_len - delta; 2109 ena_buf++; 2110 tx_info->num_of_bufs++; 2111 delta = 0; 2112 } 2113 2114 return 0; 2115 2116 error_report_dma_error: 2117 u64_stats_update_begin(&tx_ring->syncp); 2118 tx_ring->tx_stats.dma_mapping_err++; 2119 u64_stats_update_end(&tx_ring->syncp); 2120 netdev_warn(adapter->netdev, "failed to map skb\n"); 2121 2122 tx_info->skb = NULL; 2123 2124 tx_info->num_of_bufs += i; 2125 ena_unmap_tx_skb(tx_ring, tx_info); 2126 2127 return -EINVAL; 2128 } 2129 2130 /* Called with netif_tx_lock. */ 2131 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) 2132 { 2133 struct ena_adapter *adapter = netdev_priv(dev); 2134 struct ena_tx_buffer *tx_info; 2135 struct ena_com_tx_ctx ena_tx_ctx; 2136 struct ena_ring *tx_ring; 2137 struct netdev_queue *txq; 2138 void *push_hdr; 2139 u16 next_to_use, req_id, header_len; 2140 int qid, rc, nb_hw_desc; 2141 2142 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); 2143 /* Determine which tx ring we will be placed on */ 2144 qid = skb_get_queue_mapping(skb); 2145 tx_ring = &adapter->tx_ring[qid]; 2146 txq = netdev_get_tx_queue(dev, qid); 2147 2148 rc = ena_check_and_linearize_skb(tx_ring, skb); 2149 if (unlikely(rc)) 2150 goto error_drop_packet; 2151 2152 skb_tx_timestamp(skb); 2153 2154 next_to_use = tx_ring->next_to_use; 2155 req_id = tx_ring->free_tx_ids[next_to_use]; 2156 tx_info = &tx_ring->tx_buffer_info[req_id]; 2157 tx_info->num_of_bufs = 0; 2158 2159 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); 2160 2161 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); 2162 if (unlikely(rc)) 2163 goto error_drop_packet; 2164 2165 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2166 ena_tx_ctx.ena_bufs = tx_info->bufs; 2167 ena_tx_ctx.push_header = push_hdr; 2168 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2169 ena_tx_ctx.req_id = req_id; 2170 ena_tx_ctx.header_len = header_len; 2171 2172 /* set flags and meta data */ 2173 ena_tx_csum(&ena_tx_ctx, skb); 2174 2175 /* prepare the packet's descriptors to dma engine */ 2176 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2177 &nb_hw_desc); 2178 2179 /* ena_com_prepare_tx() can't fail due to overflow of tx queue, 2180 * since the number of free descriptors in the queue is checked 2181 * after sending the previous packet. In case there isn't enough 2182 * space in the queue for the next packet, it is stopped 2183 * until there is again enough available space in the queue. 2184 * All other failure reasons of ena_com_prepare_tx() are fatal 2185 * and therefore require a device reset. 2186 */ 2187 if (unlikely(rc)) { 2188 netif_err(adapter, tx_queued, dev, 2189 "failed to prepare tx bufs\n"); 2190 u64_stats_update_begin(&tx_ring->syncp); 2191 tx_ring->tx_stats.prepare_ctx_err++; 2192 u64_stats_update_end(&tx_ring->syncp); 2193 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; 2194 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2195 goto error_unmap_dma; 2196 } 2197 2198 netdev_tx_sent_queue(txq, skb->len); 2199 2200 u64_stats_update_begin(&tx_ring->syncp); 2201 tx_ring->tx_stats.cnt++; 2202 tx_ring->tx_stats.bytes += skb->len; 2203 u64_stats_update_end(&tx_ring->syncp); 2204 2205 tx_info->tx_descs = nb_hw_desc; 2206 tx_info->last_jiffies = jiffies; 2207 tx_info->print_once = 0; 2208 2209 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2210 tx_ring->ring_size); 2211 2212 /* stop the queue when no more space available, the packet can have up 2213 * to sgl_size + 2. one for the meta descriptor and one for header 2214 * (if the header is larger than tx_max_header_size). 2215 */ 2216 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2217 tx_ring->sgl_size + 2))) { 2218 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", 2219 __func__, qid); 2220 2221 netif_tx_stop_queue(txq); 2222 u64_stats_update_begin(&tx_ring->syncp); 2223 tx_ring->tx_stats.queue_stop++; 2224 u64_stats_update_end(&tx_ring->syncp); 2225 2226 /* There is a rare condition where this function decide to 2227 * stop the queue but meanwhile clean_tx_irq updates 2228 * next_to_completion and terminates. 2229 * The queue will remain stopped forever. 2230 * To solve this issue add a mb() to make sure that 2231 * netif_tx_stop_queue() write is vissible before checking if 2232 * there is additional space in the queue. 2233 */ 2234 smp_mb(); 2235 2236 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2237 ENA_TX_WAKEUP_THRESH)) { 2238 netif_tx_wake_queue(txq); 2239 u64_stats_update_begin(&tx_ring->syncp); 2240 tx_ring->tx_stats.queue_wakeup++; 2241 u64_stats_update_end(&tx_ring->syncp); 2242 } 2243 } 2244 2245 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { 2246 /* trigger the dma engine. ena_com_write_sq_doorbell() 2247 * has a mb 2248 */ 2249 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2250 u64_stats_update_begin(&tx_ring->syncp); 2251 tx_ring->tx_stats.doorbells++; 2252 u64_stats_update_end(&tx_ring->syncp); 2253 } 2254 2255 return NETDEV_TX_OK; 2256 2257 error_unmap_dma: 2258 ena_unmap_tx_skb(tx_ring, tx_info); 2259 tx_info->skb = NULL; 2260 2261 error_drop_packet: 2262 dev_kfree_skb(skb); 2263 return NETDEV_TX_OK; 2264 } 2265 2266 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2267 struct net_device *sb_dev) 2268 { 2269 u16 qid; 2270 /* we suspect that this is good for in--kernel network services that 2271 * want to loop incoming skb rx to tx in normal user generated traffic, 2272 * most probably we will not get to this 2273 */ 2274 if (skb_rx_queue_recorded(skb)) 2275 qid = skb_get_rx_queue(skb); 2276 else 2277 qid = netdev_pick_tx(dev, skb, NULL); 2278 2279 return qid; 2280 } 2281 2282 static void ena_config_host_info(struct ena_com_dev *ena_dev, 2283 struct pci_dev *pdev) 2284 { 2285 struct ena_admin_host_info *host_info; 2286 int rc; 2287 2288 /* Allocate only the host info */ 2289 rc = ena_com_allocate_host_info(ena_dev); 2290 if (rc) { 2291 pr_err("Cannot allocate host info\n"); 2292 return; 2293 } 2294 2295 host_info = ena_dev->host_attr.host_info; 2296 2297 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; 2298 host_info->os_type = ENA_ADMIN_OS_LINUX; 2299 host_info->kernel_ver = LINUX_VERSION_CODE; 2300 strlcpy(host_info->kernel_ver_str, utsname()->version, 2301 sizeof(host_info->kernel_ver_str) - 1); 2302 host_info->os_dist = 0; 2303 strncpy(host_info->os_dist_str, utsname()->release, 2304 sizeof(host_info->os_dist_str) - 1); 2305 host_info->driver_version = 2306 (DRV_MODULE_VER_MAJOR) | 2307 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2308 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | 2309 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); 2310 host_info->num_cpus = num_online_cpus(); 2311 2312 rc = ena_com_set_host_attributes(ena_dev); 2313 if (rc) { 2314 if (rc == -EOPNOTSUPP) 2315 pr_warn("Cannot set host attributes\n"); 2316 else 2317 pr_err("Cannot set host attributes\n"); 2318 2319 goto err; 2320 } 2321 2322 return; 2323 2324 err: 2325 ena_com_delete_host_info(ena_dev); 2326 } 2327 2328 static void ena_config_debug_area(struct ena_adapter *adapter) 2329 { 2330 u32 debug_area_size; 2331 int rc, ss_count; 2332 2333 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); 2334 if (ss_count <= 0) { 2335 netif_err(adapter, drv, adapter->netdev, 2336 "SS count is negative\n"); 2337 return; 2338 } 2339 2340 /* allocate 32 bytes for each string and 64bit for the value */ 2341 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 2342 2343 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); 2344 if (rc) { 2345 pr_err("Cannot allocate debug area\n"); 2346 return; 2347 } 2348 2349 rc = ena_com_set_host_attributes(adapter->ena_dev); 2350 if (rc) { 2351 if (rc == -EOPNOTSUPP) 2352 netif_warn(adapter, drv, adapter->netdev, 2353 "Cannot set host attributes\n"); 2354 else 2355 netif_err(adapter, drv, adapter->netdev, 2356 "Cannot set host attributes\n"); 2357 goto err; 2358 } 2359 2360 return; 2361 err: 2362 ena_com_delete_debug_area(adapter->ena_dev); 2363 } 2364 2365 static void ena_get_stats64(struct net_device *netdev, 2366 struct rtnl_link_stats64 *stats) 2367 { 2368 struct ena_adapter *adapter = netdev_priv(netdev); 2369 struct ena_ring *rx_ring, *tx_ring; 2370 unsigned int start; 2371 u64 rx_drops; 2372 int i; 2373 2374 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2375 return; 2376 2377 for (i = 0; i < adapter->num_queues; i++) { 2378 u64 bytes, packets; 2379 2380 tx_ring = &adapter->tx_ring[i]; 2381 2382 do { 2383 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 2384 packets = tx_ring->tx_stats.cnt; 2385 bytes = tx_ring->tx_stats.bytes; 2386 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 2387 2388 stats->tx_packets += packets; 2389 stats->tx_bytes += bytes; 2390 2391 rx_ring = &adapter->rx_ring[i]; 2392 2393 do { 2394 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 2395 packets = rx_ring->rx_stats.cnt; 2396 bytes = rx_ring->rx_stats.bytes; 2397 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 2398 2399 stats->rx_packets += packets; 2400 stats->rx_bytes += bytes; 2401 } 2402 2403 do { 2404 start = u64_stats_fetch_begin_irq(&adapter->syncp); 2405 rx_drops = adapter->dev_stats.rx_drops; 2406 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); 2407 2408 stats->rx_dropped = rx_drops; 2409 2410 stats->multicast = 0; 2411 stats->collisions = 0; 2412 2413 stats->rx_length_errors = 0; 2414 stats->rx_crc_errors = 0; 2415 stats->rx_frame_errors = 0; 2416 stats->rx_fifo_errors = 0; 2417 stats->rx_missed_errors = 0; 2418 stats->tx_window_errors = 0; 2419 2420 stats->rx_errors = 0; 2421 stats->tx_errors = 0; 2422 } 2423 2424 static const struct net_device_ops ena_netdev_ops = { 2425 .ndo_open = ena_open, 2426 .ndo_stop = ena_close, 2427 .ndo_start_xmit = ena_start_xmit, 2428 .ndo_select_queue = ena_select_queue, 2429 .ndo_get_stats64 = ena_get_stats64, 2430 .ndo_tx_timeout = ena_tx_timeout, 2431 .ndo_change_mtu = ena_change_mtu, 2432 .ndo_set_mac_address = NULL, 2433 .ndo_validate_addr = eth_validate_addr, 2434 }; 2435 2436 static int ena_device_validate_params(struct ena_adapter *adapter, 2437 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2438 { 2439 struct net_device *netdev = adapter->netdev; 2440 int rc; 2441 2442 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, 2443 adapter->mac_addr); 2444 if (!rc) { 2445 netif_err(adapter, drv, netdev, 2446 "Error, mac address are different\n"); 2447 return -EINVAL; 2448 } 2449 2450 if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) || 2451 (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) { 2452 netif_err(adapter, drv, netdev, 2453 "Error, device doesn't support enough queues\n"); 2454 return -EINVAL; 2455 } 2456 2457 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { 2458 netif_err(adapter, drv, netdev, 2459 "Error, device max mtu is smaller than netdev MTU\n"); 2460 return -EINVAL; 2461 } 2462 2463 return 0; 2464 } 2465 2466 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, 2467 struct ena_com_dev_get_features_ctx *get_feat_ctx, 2468 bool *wd_state) 2469 { 2470 struct device *dev = &pdev->dev; 2471 bool readless_supported; 2472 u32 aenq_groups; 2473 int dma_width; 2474 int rc; 2475 2476 rc = ena_com_mmio_reg_read_request_init(ena_dev); 2477 if (rc) { 2478 dev_err(dev, "failed to init mmio read less\n"); 2479 return rc; 2480 } 2481 2482 /* The PCIe configuration space revision id indicate if mmio reg 2483 * read is disabled 2484 */ 2485 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); 2486 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 2487 2488 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 2489 if (rc) { 2490 dev_err(dev, "Can not reset device\n"); 2491 goto err_mmio_read_less; 2492 } 2493 2494 rc = ena_com_validate_version(ena_dev); 2495 if (rc) { 2496 dev_err(dev, "device version is too low\n"); 2497 goto err_mmio_read_less; 2498 } 2499 2500 dma_width = ena_com_get_dma_width(ena_dev); 2501 if (dma_width < 0) { 2502 dev_err(dev, "Invalid dma width value %d", dma_width); 2503 rc = dma_width; 2504 goto err_mmio_read_less; 2505 } 2506 2507 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 2508 if (rc) { 2509 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); 2510 goto err_mmio_read_less; 2511 } 2512 2513 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 2514 if (rc) { 2515 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", 2516 rc); 2517 goto err_mmio_read_less; 2518 } 2519 2520 /* ENA admin level init */ 2521 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 2522 if (rc) { 2523 dev_err(dev, 2524 "Can not initialize ena admin queue with device\n"); 2525 goto err_mmio_read_less; 2526 } 2527 2528 /* To enable the msix interrupts the driver needs to know the number 2529 * of queues. So the driver uses polling mode to retrieve this 2530 * information 2531 */ 2532 ena_com_set_admin_polling_mode(ena_dev, true); 2533 2534 ena_config_host_info(ena_dev, pdev); 2535 2536 /* Get Device Attributes*/ 2537 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 2538 if (rc) { 2539 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); 2540 goto err_admin_init; 2541 } 2542 2543 /* Try to turn all the available aenq groups */ 2544 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2545 BIT(ENA_ADMIN_FATAL_ERROR) | 2546 BIT(ENA_ADMIN_WARNING) | 2547 BIT(ENA_ADMIN_NOTIFICATION) | 2548 BIT(ENA_ADMIN_KEEP_ALIVE); 2549 2550 aenq_groups &= get_feat_ctx->aenq.supported_groups; 2551 2552 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 2553 if (rc) { 2554 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); 2555 goto err_admin_init; 2556 } 2557 2558 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 2559 2560 return 0; 2561 2562 err_admin_init: 2563 ena_com_delete_host_info(ena_dev); 2564 ena_com_admin_destroy(ena_dev); 2565 err_mmio_read_less: 2566 ena_com_mmio_reg_read_request_destroy(ena_dev); 2567 2568 return rc; 2569 } 2570 2571 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 2572 int io_vectors) 2573 { 2574 struct ena_com_dev *ena_dev = adapter->ena_dev; 2575 struct device *dev = &adapter->pdev->dev; 2576 int rc; 2577 2578 rc = ena_enable_msix(adapter, io_vectors); 2579 if (rc) { 2580 dev_err(dev, "Can not reserve msix vectors\n"); 2581 return rc; 2582 } 2583 2584 ena_setup_mgmnt_intr(adapter); 2585 2586 rc = ena_request_mgmnt_irq(adapter); 2587 if (rc) { 2588 dev_err(dev, "Can not setup management interrupts\n"); 2589 goto err_disable_msix; 2590 } 2591 2592 ena_com_set_admin_polling_mode(ena_dev, false); 2593 2594 ena_com_admin_aenq_enable(ena_dev); 2595 2596 return 0; 2597 2598 err_disable_msix: 2599 ena_disable_msix(adapter); 2600 2601 return rc; 2602 } 2603 2604 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) 2605 { 2606 struct net_device *netdev = adapter->netdev; 2607 struct ena_com_dev *ena_dev = adapter->ena_dev; 2608 bool dev_up; 2609 2610 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 2611 return; 2612 2613 netif_carrier_off(netdev); 2614 2615 del_timer_sync(&adapter->timer_service); 2616 2617 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2618 adapter->dev_up_before_reset = dev_up; 2619 if (!graceful) 2620 ena_com_set_admin_running_state(ena_dev, false); 2621 2622 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2623 ena_down(adapter); 2624 2625 /* Stop the device from sending AENQ events (in case reset flag is set 2626 * and device is up, ena_down() already reset the device. 2627 */ 2628 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2629 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2630 2631 ena_free_mgmnt_irq(adapter); 2632 2633 ena_disable_msix(adapter); 2634 2635 ena_com_abort_admin_commands(ena_dev); 2636 2637 ena_com_wait_for_abort_completion(ena_dev); 2638 2639 ena_com_admin_destroy(ena_dev); 2640 2641 ena_com_mmio_reg_read_request_destroy(ena_dev); 2642 2643 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2644 2645 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2646 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2647 } 2648 2649 static int ena_restore_device(struct ena_adapter *adapter) 2650 { 2651 struct ena_com_dev_get_features_ctx get_feat_ctx; 2652 struct ena_com_dev *ena_dev = adapter->ena_dev; 2653 struct pci_dev *pdev = adapter->pdev; 2654 bool wd_state; 2655 int rc; 2656 2657 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2658 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); 2659 if (rc) { 2660 dev_err(&pdev->dev, "Can not initialize device\n"); 2661 goto err; 2662 } 2663 adapter->wd_state = wd_state; 2664 2665 rc = ena_device_validate_params(adapter, &get_feat_ctx); 2666 if (rc) { 2667 dev_err(&pdev->dev, "Validation of device parameters failed\n"); 2668 goto err_device_destroy; 2669 } 2670 2671 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 2672 adapter->num_queues); 2673 if (rc) { 2674 dev_err(&pdev->dev, "Enable MSI-X failed\n"); 2675 goto err_device_destroy; 2676 } 2677 /* If the interface was up before the reset bring it up */ 2678 if (adapter->dev_up_before_reset) { 2679 rc = ena_up(adapter); 2680 if (rc) { 2681 dev_err(&pdev->dev, "Failed to create I/O queues\n"); 2682 goto err_disable_msix; 2683 } 2684 } 2685 2686 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2687 2688 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2689 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 2690 netif_carrier_on(adapter->netdev); 2691 2692 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2693 dev_err(&pdev->dev, 2694 "Device reset completed successfully, Driver info: %s\n", 2695 version); 2696 2697 return rc; 2698 err_disable_msix: 2699 ena_free_mgmnt_irq(adapter); 2700 ena_disable_msix(adapter); 2701 err_device_destroy: 2702 ena_com_abort_admin_commands(ena_dev); 2703 ena_com_wait_for_abort_completion(ena_dev); 2704 ena_com_admin_destroy(ena_dev); 2705 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 2706 ena_com_mmio_reg_read_request_destroy(ena_dev); 2707 err: 2708 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2709 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2710 dev_err(&pdev->dev, 2711 "Reset attempt failed. Can not reset the device\n"); 2712 2713 return rc; 2714 } 2715 2716 static void ena_fw_reset_device(struct work_struct *work) 2717 { 2718 struct ena_adapter *adapter = 2719 container_of(work, struct ena_adapter, reset_task); 2720 struct pci_dev *pdev = adapter->pdev; 2721 2722 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 2723 dev_err(&pdev->dev, 2724 "device reset schedule while reset bit is off\n"); 2725 return; 2726 } 2727 rtnl_lock(); 2728 ena_destroy_device(adapter, false); 2729 ena_restore_device(adapter); 2730 rtnl_unlock(); 2731 } 2732 2733 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2734 struct ena_ring *rx_ring) 2735 { 2736 if (likely(rx_ring->first_interrupt)) 2737 return 0; 2738 2739 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2740 return 0; 2741 2742 rx_ring->no_interrupt_event_cnt++; 2743 2744 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 2745 netif_err(adapter, rx_err, adapter->netdev, 2746 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 2747 rx_ring->qid); 2748 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 2749 smp_mb__before_atomic(); 2750 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2751 return -EIO; 2752 } 2753 2754 return 0; 2755 } 2756 2757 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 2758 struct ena_ring *tx_ring) 2759 { 2760 struct ena_tx_buffer *tx_buf; 2761 unsigned long last_jiffies; 2762 u32 missed_tx = 0; 2763 int i, rc = 0; 2764 2765 for (i = 0; i < tx_ring->ring_size; i++) { 2766 tx_buf = &tx_ring->tx_buffer_info[i]; 2767 last_jiffies = tx_buf->last_jiffies; 2768 2769 if (last_jiffies == 0) 2770 /* no pending Tx at this location */ 2771 continue; 2772 2773 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + 2774 2 * adapter->missing_tx_completion_to))) { 2775 /* If after graceful period interrupt is still not 2776 * received, we schedule a reset 2777 */ 2778 netif_err(adapter, tx_err, adapter->netdev, 2779 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", 2780 tx_ring->qid); 2781 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 2782 smp_mb__before_atomic(); 2783 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2784 return -EIO; 2785 } 2786 2787 if (unlikely(time_is_before_jiffies(last_jiffies + 2788 adapter->missing_tx_completion_to))) { 2789 if (!tx_buf->print_once) 2790 netif_notice(adapter, tx_err, adapter->netdev, 2791 "Found a Tx that wasn't completed on time, qid %d, index %d.\n", 2792 tx_ring->qid, i); 2793 2794 tx_buf->print_once = 1; 2795 missed_tx++; 2796 } 2797 } 2798 2799 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { 2800 netif_err(adapter, tx_err, adapter->netdev, 2801 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", 2802 missed_tx, 2803 adapter->missing_tx_completion_threshold); 2804 adapter->reset_reason = 2805 ENA_REGS_RESET_MISS_TX_CMPL; 2806 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2807 rc = -EIO; 2808 } 2809 2810 u64_stats_update_begin(&tx_ring->syncp); 2811 tx_ring->tx_stats.missed_tx = missed_tx; 2812 u64_stats_update_end(&tx_ring->syncp); 2813 2814 return rc; 2815 } 2816 2817 static void check_for_missing_completions(struct ena_adapter *adapter) 2818 { 2819 struct ena_ring *tx_ring; 2820 struct ena_ring *rx_ring; 2821 int i, budget, rc; 2822 2823 /* Make sure the driver doesn't turn the device in other process */ 2824 smp_rmb(); 2825 2826 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2827 return; 2828 2829 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2830 return; 2831 2832 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 2833 return; 2834 2835 budget = ENA_MONITORED_TX_QUEUES; 2836 2837 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { 2838 tx_ring = &adapter->tx_ring[i]; 2839 rx_ring = &adapter->rx_ring[i]; 2840 2841 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 2842 if (unlikely(rc)) 2843 return; 2844 2845 rc = check_for_rx_interrupt_queue(adapter, rx_ring); 2846 if (unlikely(rc)) 2847 return; 2848 2849 budget--; 2850 if (!budget) 2851 break; 2852 } 2853 2854 adapter->last_monitored_tx_qid = i % adapter->num_queues; 2855 } 2856 2857 /* trigger napi schedule after 2 consecutive detections */ 2858 #define EMPTY_RX_REFILL 2 2859 /* For the rare case where the device runs out of Rx descriptors and the 2860 * napi handler failed to refill new Rx descriptors (due to a lack of memory 2861 * for example). 2862 * This case will lead to a deadlock: 2863 * The device won't send interrupts since all the new Rx packets will be dropped 2864 * The napi handler won't allocate new Rx descriptors so the device will be 2865 * able to send new packets. 2866 * 2867 * This scenario can happen when the kernel's vm.min_free_kbytes is too small. 2868 * It is recommended to have at least 512MB, with a minimum of 128MB for 2869 * constrained environment). 2870 * 2871 * When such a situation is detected - Reschedule napi 2872 */ 2873 static void check_for_empty_rx_ring(struct ena_adapter *adapter) 2874 { 2875 struct ena_ring *rx_ring; 2876 int i, refill_required; 2877 2878 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2879 return; 2880 2881 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2882 return; 2883 2884 for (i = 0; i < adapter->num_queues; i++) { 2885 rx_ring = &adapter->rx_ring[i]; 2886 2887 refill_required = 2888 ena_com_free_desc(rx_ring->ena_com_io_sq); 2889 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 2890 rx_ring->empty_rx_queue++; 2891 2892 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 2893 u64_stats_update_begin(&rx_ring->syncp); 2894 rx_ring->rx_stats.empty_rx_ring++; 2895 u64_stats_update_end(&rx_ring->syncp); 2896 2897 netif_err(adapter, drv, adapter->netdev, 2898 "trigger refill for ring %d\n", i); 2899 2900 napi_schedule(rx_ring->napi); 2901 rx_ring->empty_rx_queue = 0; 2902 } 2903 } else { 2904 rx_ring->empty_rx_queue = 0; 2905 } 2906 } 2907 } 2908 2909 /* Check for keep alive expiration */ 2910 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 2911 { 2912 unsigned long keep_alive_expired; 2913 2914 if (!adapter->wd_state) 2915 return; 2916 2917 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2918 return; 2919 2920 keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + 2921 adapter->keep_alive_timeout); 2922 if (unlikely(time_is_before_jiffies(keep_alive_expired))) { 2923 netif_err(adapter, drv, adapter->netdev, 2924 "Keep alive watchdog timeout.\n"); 2925 u64_stats_update_begin(&adapter->syncp); 2926 adapter->dev_stats.wd_expired++; 2927 u64_stats_update_end(&adapter->syncp); 2928 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 2929 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2930 } 2931 } 2932 2933 static void check_for_admin_com_state(struct ena_adapter *adapter) 2934 { 2935 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { 2936 netif_err(adapter, drv, adapter->netdev, 2937 "ENA admin queue is not in running state!\n"); 2938 u64_stats_update_begin(&adapter->syncp); 2939 adapter->dev_stats.admin_q_pause++; 2940 u64_stats_update_end(&adapter->syncp); 2941 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 2942 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2943 } 2944 } 2945 2946 static void ena_update_hints(struct ena_adapter *adapter, 2947 struct ena_admin_ena_hw_hints *hints) 2948 { 2949 struct net_device *netdev = adapter->netdev; 2950 2951 if (hints->admin_completion_tx_timeout) 2952 adapter->ena_dev->admin_queue.completion_timeout = 2953 hints->admin_completion_tx_timeout * 1000; 2954 2955 if (hints->mmio_read_timeout) 2956 /* convert to usec */ 2957 adapter->ena_dev->mmio_read.reg_read_to = 2958 hints->mmio_read_timeout * 1000; 2959 2960 if (hints->missed_tx_completion_count_threshold_to_reset) 2961 adapter->missing_tx_completion_threshold = 2962 hints->missed_tx_completion_count_threshold_to_reset; 2963 2964 if (hints->missing_tx_completion_timeout) { 2965 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2966 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; 2967 else 2968 adapter->missing_tx_completion_to = 2969 msecs_to_jiffies(hints->missing_tx_completion_timeout); 2970 } 2971 2972 if (hints->netdev_wd_timeout) 2973 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); 2974 2975 if (hints->driver_watchdog_timeout) { 2976 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2977 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2978 else 2979 adapter->keep_alive_timeout = 2980 msecs_to_jiffies(hints->driver_watchdog_timeout); 2981 } 2982 } 2983 2984 static void ena_update_host_info(struct ena_admin_host_info *host_info, 2985 struct net_device *netdev) 2986 { 2987 host_info->supported_network_features[0] = 2988 netdev->features & GENMASK_ULL(31, 0); 2989 host_info->supported_network_features[1] = 2990 (netdev->features & GENMASK_ULL(63, 32)) >> 32; 2991 } 2992 2993 static void ena_timer_service(struct timer_list *t) 2994 { 2995 struct ena_adapter *adapter = from_timer(adapter, t, timer_service); 2996 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; 2997 struct ena_admin_host_info *host_info = 2998 adapter->ena_dev->host_attr.host_info; 2999 3000 check_for_missing_keep_alive(adapter); 3001 3002 check_for_admin_com_state(adapter); 3003 3004 check_for_missing_completions(adapter); 3005 3006 check_for_empty_rx_ring(adapter); 3007 3008 if (debug_area) 3009 ena_dump_stats_to_buf(adapter, debug_area); 3010 3011 if (host_info) 3012 ena_update_host_info(host_info, adapter->netdev); 3013 3014 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 3015 netif_err(adapter, drv, adapter->netdev, 3016 "Trigger reset is on\n"); 3017 ena_dump_stats_to_dmesg(adapter); 3018 queue_work(ena_wq, &adapter->reset_task); 3019 return; 3020 } 3021 3022 /* Reset the timer */ 3023 mod_timer(&adapter->timer_service, jiffies + HZ); 3024 } 3025 3026 static int ena_calc_io_queue_num(struct pci_dev *pdev, 3027 struct ena_com_dev *ena_dev, 3028 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3029 { 3030 int io_sq_num, io_queue_num; 3031 3032 /* In case of LLQ use the llq number in the get feature cmd */ 3033 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 3034 io_sq_num = get_feat_ctx->llq.max_llq_num; 3035 else 3036 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 3037 3038 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); 3039 io_queue_num = min_t(int, io_queue_num, io_sq_num); 3040 io_queue_num = min_t(int, io_queue_num, 3041 get_feat_ctx->max_queues.max_cq_num); 3042 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ 3043 io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1); 3044 if (unlikely(!io_queue_num)) { 3045 dev_err(&pdev->dev, "The device doesn't have io queues\n"); 3046 return -EFAULT; 3047 } 3048 3049 return io_queue_num; 3050 } 3051 3052 static int ena_set_queues_placement_policy(struct pci_dev *pdev, 3053 struct ena_com_dev *ena_dev, 3054 struct ena_admin_feature_llq_desc *llq, 3055 struct ena_llq_configurations *llq_default_configurations) 3056 { 3057 bool has_mem_bar; 3058 int rc; 3059 u32 llq_feature_mask; 3060 3061 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 3062 if (!(ena_dev->supported_features & llq_feature_mask)) { 3063 dev_err(&pdev->dev, 3064 "LLQ is not supported Fallback to host mode policy.\n"); 3065 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3066 return 0; 3067 } 3068 3069 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); 3070 3071 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 3072 if (unlikely(rc)) { 3073 dev_err(&pdev->dev, 3074 "Failed to configure the device mode. Fallback to host mode policy.\n"); 3075 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3076 return 0; 3077 } 3078 3079 /* Nothing to config, exit */ 3080 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 3081 return 0; 3082 3083 if (!has_mem_bar) { 3084 dev_err(&pdev->dev, 3085 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); 3086 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3087 return 0; 3088 } 3089 3090 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, 3091 pci_resource_start(pdev, ENA_MEM_BAR), 3092 pci_resource_len(pdev, ENA_MEM_BAR)); 3093 3094 if (!ena_dev->mem_bar) 3095 return -EFAULT; 3096 3097 return 0; 3098 } 3099 3100 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, 3101 struct net_device *netdev) 3102 { 3103 netdev_features_t dev_features = 0; 3104 3105 /* Set offload features */ 3106 if (feat->offload.tx & 3107 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 3108 dev_features |= NETIF_F_IP_CSUM; 3109 3110 if (feat->offload.tx & 3111 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 3112 dev_features |= NETIF_F_IPV6_CSUM; 3113 3114 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 3115 dev_features |= NETIF_F_TSO; 3116 3117 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) 3118 dev_features |= NETIF_F_TSO6; 3119 3120 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) 3121 dev_features |= NETIF_F_TSO_ECN; 3122 3123 if (feat->offload.rx_supported & 3124 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 3125 dev_features |= NETIF_F_RXCSUM; 3126 3127 if (feat->offload.rx_supported & 3128 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 3129 dev_features |= NETIF_F_RXCSUM; 3130 3131 netdev->features = 3132 dev_features | 3133 NETIF_F_SG | 3134 NETIF_F_RXHASH | 3135 NETIF_F_HIGHDMA; 3136 3137 netdev->hw_features |= netdev->features; 3138 netdev->vlan_features |= netdev->features; 3139 } 3140 3141 static void ena_set_conf_feat_params(struct ena_adapter *adapter, 3142 struct ena_com_dev_get_features_ctx *feat) 3143 { 3144 struct net_device *netdev = adapter->netdev; 3145 3146 /* Copy mac address */ 3147 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { 3148 eth_hw_addr_random(netdev); 3149 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 3150 } else { 3151 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); 3152 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 3153 } 3154 3155 /* Set offload features */ 3156 ena_set_dev_offloads(feat, netdev); 3157 3158 adapter->max_mtu = feat->dev_attr.max_mtu; 3159 netdev->max_mtu = adapter->max_mtu; 3160 netdev->min_mtu = ENA_MIN_MTU; 3161 } 3162 3163 static int ena_rss_init_default(struct ena_adapter *adapter) 3164 { 3165 struct ena_com_dev *ena_dev = adapter->ena_dev; 3166 struct device *dev = &adapter->pdev->dev; 3167 int rc, i; 3168 u32 val; 3169 3170 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 3171 if (unlikely(rc)) { 3172 dev_err(dev, "Cannot init indirect table\n"); 3173 goto err_rss_init; 3174 } 3175 3176 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 3177 val = ethtool_rxfh_indir_default(i, adapter->num_queues); 3178 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 3179 ENA_IO_RXQ_IDX(val)); 3180 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3181 dev_err(dev, "Cannot fill indirect table\n"); 3182 goto err_fill_indir; 3183 } 3184 } 3185 3186 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 3187 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 3188 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3189 dev_err(dev, "Cannot fill hash function\n"); 3190 goto err_fill_indir; 3191 } 3192 3193 rc = ena_com_set_default_hash_ctrl(ena_dev); 3194 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3195 dev_err(dev, "Cannot fill hash control\n"); 3196 goto err_fill_indir; 3197 } 3198 3199 return 0; 3200 3201 err_fill_indir: 3202 ena_com_rss_destroy(ena_dev); 3203 err_rss_init: 3204 3205 return rc; 3206 } 3207 3208 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3209 { 3210 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3211 3212 pci_release_selected_regions(pdev, release_bars); 3213 } 3214 3215 static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 3216 { 3217 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 3218 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 3219 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 3220 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 3221 llq_config->llq_ring_entry_size_value = 128; 3222 } 3223 3224 static int ena_calc_queue_size(struct pci_dev *pdev, 3225 struct ena_com_dev *ena_dev, 3226 u16 *max_tx_sgl_size, 3227 u16 *max_rx_sgl_size, 3228 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3229 { 3230 u32 queue_size = ENA_DEFAULT_RING_SIZE; 3231 3232 queue_size = min_t(u32, queue_size, 3233 get_feat_ctx->max_queues.max_cq_depth); 3234 queue_size = min_t(u32, queue_size, 3235 get_feat_ctx->max_queues.max_sq_depth); 3236 3237 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 3238 queue_size = min_t(u32, queue_size, 3239 get_feat_ctx->llq.max_llq_depth); 3240 3241 queue_size = rounddown_pow_of_two(queue_size); 3242 3243 if (unlikely(!queue_size)) { 3244 dev_err(&pdev->dev, "Invalid queue size\n"); 3245 return -EFAULT; 3246 } 3247 3248 *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 3249 get_feat_ctx->max_queues.max_packet_tx_descs); 3250 *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 3251 get_feat_ctx->max_queues.max_packet_rx_descs); 3252 3253 return queue_size; 3254 } 3255 3256 /* ena_probe - Device Initialization Routine 3257 * @pdev: PCI device information struct 3258 * @ent: entry in ena_pci_tbl 3259 * 3260 * Returns 0 on success, negative on failure 3261 * 3262 * ena_probe initializes an adapter identified by a pci_dev structure. 3263 * The OS initialization, configuring of the adapter private structure, 3264 * and a hardware reset occur. 3265 */ 3266 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3267 { 3268 struct ena_com_dev_get_features_ctx get_feat_ctx; 3269 static int version_printed; 3270 struct net_device *netdev; 3271 struct ena_adapter *adapter; 3272 struct ena_llq_configurations llq_config; 3273 struct ena_com_dev *ena_dev = NULL; 3274 char *queue_type_str; 3275 static int adapters_found; 3276 int io_queue_num, bars, rc; 3277 int queue_size; 3278 u16 tx_sgl_size = 0; 3279 u16 rx_sgl_size = 0; 3280 bool wd_state; 3281 3282 dev_dbg(&pdev->dev, "%s\n", __func__); 3283 3284 if (version_printed++ == 0) 3285 dev_info(&pdev->dev, "%s", version); 3286 3287 rc = pci_enable_device_mem(pdev); 3288 if (rc) { 3289 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); 3290 return rc; 3291 } 3292 3293 pci_set_master(pdev); 3294 3295 ena_dev = vzalloc(sizeof(*ena_dev)); 3296 if (!ena_dev) { 3297 rc = -ENOMEM; 3298 goto err_disable_device; 3299 } 3300 3301 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3302 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); 3303 if (rc) { 3304 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", 3305 rc); 3306 goto err_free_ena_dev; 3307 } 3308 3309 ena_dev->reg_bar = devm_ioremap(&pdev->dev, 3310 pci_resource_start(pdev, ENA_REG_BAR), 3311 pci_resource_len(pdev, ENA_REG_BAR)); 3312 if (!ena_dev->reg_bar) { 3313 dev_err(&pdev->dev, "failed to remap regs bar\n"); 3314 rc = -EFAULT; 3315 goto err_free_region; 3316 } 3317 3318 ena_dev->dmadev = &pdev->dev; 3319 3320 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); 3321 if (rc) { 3322 dev_err(&pdev->dev, "ena device init failed\n"); 3323 if (rc == -ETIME) 3324 rc = -EPROBE_DEFER; 3325 goto err_free_region; 3326 } 3327 3328 set_default_llq_configurations(&llq_config); 3329 3330 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 3331 &llq_config); 3332 if (rc) { 3333 dev_err(&pdev->dev, "ena device init failed\n"); 3334 goto err_device_destroy; 3335 } 3336 3337 /* initial Tx interrupt delay, Assumes 1 usec granularity. 3338 * Updated during device initialization with the real granularity 3339 */ 3340 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; 3341 io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); 3342 queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size, 3343 &rx_sgl_size, &get_feat_ctx); 3344 if ((queue_size <= 0) || (io_queue_num <= 0)) { 3345 rc = -EFAULT; 3346 goto err_device_destroy; 3347 } 3348 3349 dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", 3350 io_queue_num, queue_size, 3351 (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? 3352 "ENABLED" : "DISABLED"); 3353 3354 /* dev zeroed in init_etherdev */ 3355 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); 3356 if (!netdev) { 3357 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); 3358 rc = -ENOMEM; 3359 goto err_device_destroy; 3360 } 3361 3362 SET_NETDEV_DEV(netdev, &pdev->dev); 3363 3364 adapter = netdev_priv(netdev); 3365 pci_set_drvdata(pdev, adapter); 3366 3367 adapter->ena_dev = ena_dev; 3368 adapter->netdev = netdev; 3369 adapter->pdev = pdev; 3370 3371 ena_set_conf_feat_params(adapter, &get_feat_ctx); 3372 3373 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3374 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 3375 3376 adapter->tx_ring_size = queue_size; 3377 adapter->rx_ring_size = queue_size; 3378 3379 adapter->max_tx_sgl_size = tx_sgl_size; 3380 adapter->max_rx_sgl_size = rx_sgl_size; 3381 3382 adapter->num_queues = io_queue_num; 3383 adapter->last_monitored_tx_qid = 0; 3384 3385 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; 3386 adapter->wd_state = wd_state; 3387 3388 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); 3389 3390 rc = ena_com_init_interrupt_moderation(adapter->ena_dev); 3391 if (rc) { 3392 dev_err(&pdev->dev, 3393 "Failed to query interrupt moderation feature\n"); 3394 goto err_netdev_destroy; 3395 } 3396 ena_init_io_rings(adapter); 3397 3398 netdev->netdev_ops = &ena_netdev_ops; 3399 netdev->watchdog_timeo = TX_TIMEOUT; 3400 ena_set_ethtool_ops(netdev); 3401 3402 netdev->priv_flags |= IFF_UNICAST_FLT; 3403 3404 u64_stats_init(&adapter->syncp); 3405 3406 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 3407 if (rc) { 3408 dev_err(&pdev->dev, 3409 "Failed to enable and set the admin interrupts\n"); 3410 goto err_worker_destroy; 3411 } 3412 rc = ena_rss_init_default(adapter); 3413 if (rc && (rc != -EOPNOTSUPP)) { 3414 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); 3415 goto err_free_msix; 3416 } 3417 3418 ena_config_debug_area(adapter); 3419 3420 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); 3421 3422 netif_carrier_off(netdev); 3423 3424 rc = register_netdev(netdev); 3425 if (rc) { 3426 dev_err(&pdev->dev, "Cannot register net device\n"); 3427 goto err_rss; 3428 } 3429 3430 INIT_WORK(&adapter->reset_task, ena_fw_reset_device); 3431 3432 adapter->last_keep_alive_jiffies = jiffies; 3433 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 3434 adapter->missing_tx_completion_to = TX_TIMEOUT; 3435 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; 3436 3437 ena_update_hints(adapter, &get_feat_ctx.hw_hints); 3438 3439 timer_setup(&adapter->timer_service, ena_timer_service, 0); 3440 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 3441 3442 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 3443 queue_type_str = "Regular"; 3444 else 3445 queue_type_str = "Low Latency"; 3446 3447 dev_info(&pdev->dev, 3448 "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", 3449 DEVICE_NAME, (long)pci_resource_start(pdev, 0), 3450 netdev->dev_addr, io_queue_num, queue_type_str); 3451 3452 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3453 3454 adapters_found++; 3455 3456 return 0; 3457 3458 err_rss: 3459 ena_com_delete_debug_area(ena_dev); 3460 ena_com_rss_destroy(ena_dev); 3461 err_free_msix: 3462 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 3463 /* stop submitting admin commands on a device that was reset */ 3464 ena_com_set_admin_running_state(ena_dev, false); 3465 ena_free_mgmnt_irq(adapter); 3466 ena_disable_msix(adapter); 3467 err_worker_destroy: 3468 ena_com_destroy_interrupt_moderation(ena_dev); 3469 del_timer(&adapter->timer_service); 3470 err_netdev_destroy: 3471 free_netdev(netdev); 3472 err_device_destroy: 3473 ena_com_delete_host_info(ena_dev); 3474 ena_com_admin_destroy(ena_dev); 3475 err_free_region: 3476 ena_release_bars(ena_dev, pdev); 3477 err_free_ena_dev: 3478 vfree(ena_dev); 3479 err_disable_device: 3480 pci_disable_device(pdev); 3481 return rc; 3482 } 3483 3484 /*****************************************************************************/ 3485 3486 /* ena_remove - Device Removal Routine 3487 * @pdev: PCI device information struct 3488 * 3489 * ena_remove is called by the PCI subsystem to alert the driver 3490 * that it should release a PCI device. 3491 */ 3492 static void ena_remove(struct pci_dev *pdev) 3493 { 3494 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3495 struct ena_com_dev *ena_dev; 3496 struct net_device *netdev; 3497 3498 ena_dev = adapter->ena_dev; 3499 netdev = adapter->netdev; 3500 3501 #ifdef CONFIG_RFS_ACCEL 3502 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { 3503 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 3504 netdev->rx_cpu_rmap = NULL; 3505 } 3506 #endif /* CONFIG_RFS_ACCEL */ 3507 del_timer_sync(&adapter->timer_service); 3508 3509 cancel_work_sync(&adapter->reset_task); 3510 3511 rtnl_lock(); 3512 ena_destroy_device(adapter, true); 3513 rtnl_unlock(); 3514 3515 unregister_netdev(netdev); 3516 3517 free_netdev(netdev); 3518 3519 ena_com_rss_destroy(ena_dev); 3520 3521 ena_com_delete_debug_area(ena_dev); 3522 3523 ena_com_delete_host_info(ena_dev); 3524 3525 ena_release_bars(ena_dev, pdev); 3526 3527 pci_disable_device(pdev); 3528 3529 ena_com_destroy_interrupt_moderation(ena_dev); 3530 3531 vfree(ena_dev); 3532 } 3533 3534 #ifdef CONFIG_PM 3535 /* ena_suspend - PM suspend callback 3536 * @pdev: PCI device information struct 3537 * @state:power state 3538 */ 3539 static int ena_suspend(struct pci_dev *pdev, pm_message_t state) 3540 { 3541 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3542 3543 u64_stats_update_begin(&adapter->syncp); 3544 adapter->dev_stats.suspend++; 3545 u64_stats_update_end(&adapter->syncp); 3546 3547 rtnl_lock(); 3548 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 3549 dev_err(&pdev->dev, 3550 "ignoring device reset request as the device is being suspended\n"); 3551 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3552 } 3553 ena_destroy_device(adapter, true); 3554 rtnl_unlock(); 3555 return 0; 3556 } 3557 3558 /* ena_resume - PM resume callback 3559 * @pdev: PCI device information struct 3560 * 3561 */ 3562 static int ena_resume(struct pci_dev *pdev) 3563 { 3564 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3565 int rc; 3566 3567 u64_stats_update_begin(&adapter->syncp); 3568 adapter->dev_stats.resume++; 3569 u64_stats_update_end(&adapter->syncp); 3570 3571 rtnl_lock(); 3572 rc = ena_restore_device(adapter); 3573 rtnl_unlock(); 3574 return rc; 3575 } 3576 #endif 3577 3578 static struct pci_driver ena_pci_driver = { 3579 .name = DRV_MODULE_NAME, 3580 .id_table = ena_pci_tbl, 3581 .probe = ena_probe, 3582 .remove = ena_remove, 3583 #ifdef CONFIG_PM 3584 .suspend = ena_suspend, 3585 .resume = ena_resume, 3586 #endif 3587 .sriov_configure = pci_sriov_configure_simple, 3588 }; 3589 3590 static int __init ena_init(void) 3591 { 3592 pr_info("%s", version); 3593 3594 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); 3595 if (!ena_wq) { 3596 pr_err("Failed to create workqueue\n"); 3597 return -ENOMEM; 3598 } 3599 3600 return pci_register_driver(&ena_pci_driver); 3601 } 3602 3603 static void __exit ena_cleanup(void) 3604 { 3605 pci_unregister_driver(&ena_pci_driver); 3606 3607 if (ena_wq) { 3608 destroy_workqueue(ena_wq); 3609 ena_wq = NULL; 3610 } 3611 } 3612 3613 /****************************************************************************** 3614 ******************************** AENQ Handlers ******************************* 3615 *****************************************************************************/ 3616 /* ena_update_on_link_change: 3617 * Notify the network interface about the change in link status 3618 */ 3619 static void ena_update_on_link_change(void *adapter_data, 3620 struct ena_admin_aenq_entry *aenq_e) 3621 { 3622 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3623 struct ena_admin_aenq_link_change_desc *aenq_desc = 3624 (struct ena_admin_aenq_link_change_desc *)aenq_e; 3625 int status = aenq_desc->flags & 3626 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 3627 3628 if (status) { 3629 netdev_dbg(adapter->netdev, "%s\n", __func__); 3630 set_bit(ENA_FLAG_LINK_UP, &adapter->flags); 3631 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) 3632 netif_carrier_on(adapter->netdev); 3633 } else { 3634 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); 3635 netif_carrier_off(adapter->netdev); 3636 } 3637 } 3638 3639 static void ena_keep_alive_wd(void *adapter_data, 3640 struct ena_admin_aenq_entry *aenq_e) 3641 { 3642 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3643 struct ena_admin_aenq_keep_alive_desc *desc; 3644 u64 rx_drops; 3645 3646 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3647 adapter->last_keep_alive_jiffies = jiffies; 3648 3649 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; 3650 3651 u64_stats_update_begin(&adapter->syncp); 3652 adapter->dev_stats.rx_drops = rx_drops; 3653 u64_stats_update_end(&adapter->syncp); 3654 } 3655 3656 static void ena_notification(void *adapter_data, 3657 struct ena_admin_aenq_entry *aenq_e) 3658 { 3659 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3660 struct ena_admin_ena_hw_hints *hints; 3661 3662 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 3663 "Invalid group(%x) expected %x\n", 3664 aenq_e->aenq_common_desc.group, 3665 ENA_ADMIN_NOTIFICATION); 3666 3667 switch (aenq_e->aenq_common_desc.syndrom) { 3668 case ENA_ADMIN_UPDATE_HINTS: 3669 hints = (struct ena_admin_ena_hw_hints *) 3670 (&aenq_e->inline_data_w4); 3671 ena_update_hints(adapter, hints); 3672 break; 3673 default: 3674 netif_err(adapter, drv, adapter->netdev, 3675 "Invalid aenq notification link state %d\n", 3676 aenq_e->aenq_common_desc.syndrom); 3677 } 3678 } 3679 3680 /* This handler will called for unknown event group or unimplemented handlers*/ 3681 static void unimplemented_aenq_handler(void *data, 3682 struct ena_admin_aenq_entry *aenq_e) 3683 { 3684 struct ena_adapter *adapter = (struct ena_adapter *)data; 3685 3686 netif_err(adapter, drv, adapter->netdev, 3687 "Unknown event was received or event with unimplemented handler\n"); 3688 } 3689 3690 static struct ena_aenq_handlers aenq_handlers = { 3691 .handlers = { 3692 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3693 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3694 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 3695 }, 3696 .unimplemented_handler = unimplemented_aenq_handler 3697 }; 3698 3699 module_init(ena_init); 3700 module_exit(ena_cleanup); 3701