1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #ifdef CONFIG_RFS_ACCEL 36 #include <linux/cpu_rmap.h> 37 #endif /* CONFIG_RFS_ACCEL */ 38 #include <linux/ethtool.h> 39 #include <linux/if_vlan.h> 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/moduleparam.h> 43 #include <linux/numa.h> 44 #include <linux/pci.h> 45 #include <linux/utsname.h> 46 #include <linux/version.h> 47 #include <linux/vmalloc.h> 48 #include <net/ip.h> 49 50 #include "ena_netdev.h" 51 #include "ena_pci_id_tbl.h" 52 53 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n"; 54 55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); 56 MODULE_DESCRIPTION(DEVICE_NAME); 57 MODULE_LICENSE("GPL"); 58 MODULE_VERSION(DRV_MODULE_VERSION); 59 60 /* Time in jiffies before concluding the transmitter is hung. */ 61 #define TX_TIMEOUT (5 * HZ) 62 63 #define ENA_NAPI_BUDGET 64 64 65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ 66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 67 static int debug = -1; 68 module_param(debug, int, 0); 69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 70 71 static struct ena_aenq_handlers aenq_handlers; 72 73 static struct workqueue_struct *ena_wq; 74 75 MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 76 77 static int ena_rss_init_default(struct ena_adapter *adapter); 78 static void check_for_admin_com_state(struct ena_adapter *adapter); 79 static void ena_destroy_device(struct ena_adapter *adapter); 80 static int ena_restore_device(struct ena_adapter *adapter); 81 82 static void ena_tx_timeout(struct net_device *dev) 83 { 84 struct ena_adapter *adapter = netdev_priv(dev); 85 86 /* Change the state of the device to trigger reset 87 * Check that we are not in the middle or a trigger already 88 */ 89 90 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 91 return; 92 93 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; 94 u64_stats_update_begin(&adapter->syncp); 95 adapter->dev_stats.tx_timeout++; 96 u64_stats_update_end(&adapter->syncp); 97 98 netif_err(adapter, tx_err, dev, "Transmit time out\n"); 99 } 100 101 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) 102 { 103 int i; 104 105 for (i = 0; i < adapter->num_queues; i++) 106 adapter->rx_ring[i].mtu = mtu; 107 } 108 109 static int ena_change_mtu(struct net_device *dev, int new_mtu) 110 { 111 struct ena_adapter *adapter = netdev_priv(dev); 112 int ret; 113 114 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 115 if (!ret) { 116 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); 117 update_rx_ring_mtu(adapter, new_mtu); 118 dev->mtu = new_mtu; 119 } else { 120 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", 121 new_mtu); 122 } 123 124 return ret; 125 } 126 127 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) 128 { 129 #ifdef CONFIG_RFS_ACCEL 130 u32 i; 131 int rc; 132 133 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues); 134 if (!adapter->netdev->rx_cpu_rmap) 135 return -ENOMEM; 136 for (i = 0; i < adapter->num_queues; i++) { 137 int irq_idx = ENA_IO_IRQ_IDX(i); 138 139 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, 140 pci_irq_vector(adapter->pdev, irq_idx)); 141 if (rc) { 142 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 143 adapter->netdev->rx_cpu_rmap = NULL; 144 return rc; 145 } 146 } 147 #endif /* CONFIG_RFS_ACCEL */ 148 return 0; 149 } 150 151 static void ena_init_io_rings_common(struct ena_adapter *adapter, 152 struct ena_ring *ring, u16 qid) 153 { 154 ring->qid = qid; 155 ring->pdev = adapter->pdev; 156 ring->dev = &adapter->pdev->dev; 157 ring->netdev = adapter->netdev; 158 ring->napi = &adapter->ena_napi[qid].napi; 159 ring->adapter = adapter; 160 ring->ena_dev = adapter->ena_dev; 161 ring->per_napi_packets = 0; 162 ring->per_napi_bytes = 0; 163 ring->cpu = 0; 164 u64_stats_init(&ring->syncp); 165 } 166 167 static void ena_init_io_rings(struct ena_adapter *adapter) 168 { 169 struct ena_com_dev *ena_dev; 170 struct ena_ring *txr, *rxr; 171 int i; 172 173 ena_dev = adapter->ena_dev; 174 175 for (i = 0; i < adapter->num_queues; i++) { 176 txr = &adapter->tx_ring[i]; 177 rxr = &adapter->rx_ring[i]; 178 179 /* TX/RX common ring state */ 180 ena_init_io_rings_common(adapter, txr, i); 181 ena_init_io_rings_common(adapter, rxr, i); 182 183 /* TX specific ring state */ 184 txr->ring_size = adapter->tx_ring_size; 185 txr->tx_max_header_size = ena_dev->tx_max_header_size; 186 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 187 txr->sgl_size = adapter->max_tx_sgl_size; 188 txr->smoothed_interval = 189 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 190 191 /* RX specific ring state */ 192 rxr->ring_size = adapter->rx_ring_size; 193 rxr->rx_copybreak = adapter->rx_copybreak; 194 rxr->sgl_size = adapter->max_rx_sgl_size; 195 rxr->smoothed_interval = 196 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 197 rxr->empty_rx_queue = 0; 198 } 199 } 200 201 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) 202 * @adapter: network interface device structure 203 * @qid: queue index 204 * 205 * Return 0 on success, negative on failure 206 */ 207 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 208 { 209 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 210 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 211 int size, i, node; 212 213 if (tx_ring->tx_buffer_info) { 214 netif_err(adapter, ifup, 215 adapter->netdev, "tx_buffer_info info is not NULL"); 216 return -EEXIST; 217 } 218 219 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 220 node = cpu_to_node(ena_irq->cpu); 221 222 tx_ring->tx_buffer_info = vzalloc_node(size, node); 223 if (!tx_ring->tx_buffer_info) { 224 tx_ring->tx_buffer_info = vzalloc(size); 225 if (!tx_ring->tx_buffer_info) 226 return -ENOMEM; 227 } 228 229 size = sizeof(u16) * tx_ring->ring_size; 230 tx_ring->free_tx_ids = vzalloc_node(size, node); 231 if (!tx_ring->free_tx_ids) { 232 tx_ring->free_tx_ids = vzalloc(size); 233 if (!tx_ring->free_tx_ids) { 234 vfree(tx_ring->tx_buffer_info); 235 return -ENOMEM; 236 } 237 } 238 239 /* Req id ring for TX out of order completions */ 240 for (i = 0; i < tx_ring->ring_size; i++) 241 tx_ring->free_tx_ids[i] = i; 242 243 /* Reset tx statistics */ 244 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); 245 246 tx_ring->next_to_use = 0; 247 tx_ring->next_to_clean = 0; 248 tx_ring->cpu = ena_irq->cpu; 249 return 0; 250 } 251 252 /* ena_free_tx_resources - Free I/O Tx Resources per Queue 253 * @adapter: network interface device structure 254 * @qid: queue index 255 * 256 * Free all transmit software resources 257 */ 258 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) 259 { 260 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 261 262 vfree(tx_ring->tx_buffer_info); 263 tx_ring->tx_buffer_info = NULL; 264 265 vfree(tx_ring->free_tx_ids); 266 tx_ring->free_tx_ids = NULL; 267 } 268 269 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues 270 * @adapter: private structure 271 * 272 * Return 0 on success, negative on failure 273 */ 274 static int ena_setup_all_tx_resources(struct ena_adapter *adapter) 275 { 276 int i, rc = 0; 277 278 for (i = 0; i < adapter->num_queues; i++) { 279 rc = ena_setup_tx_resources(adapter, i); 280 if (rc) 281 goto err_setup_tx; 282 } 283 284 return 0; 285 286 err_setup_tx: 287 288 netif_err(adapter, ifup, adapter->netdev, 289 "Tx queue %d: allocation failed\n", i); 290 291 /* rewind the index freeing the rings as we go */ 292 while (i--) 293 ena_free_tx_resources(adapter, i); 294 return rc; 295 } 296 297 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues 298 * @adapter: board private structure 299 * 300 * Free all transmit software resources 301 */ 302 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) 303 { 304 int i; 305 306 for (i = 0; i < adapter->num_queues; i++) 307 ena_free_tx_resources(adapter, i); 308 } 309 310 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) 311 { 312 if (likely(req_id < rx_ring->ring_size)) 313 return 0; 314 315 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 316 "Invalid rx req_id: %hu\n", req_id); 317 318 u64_stats_update_begin(&rx_ring->syncp); 319 rx_ring->rx_stats.bad_req_id++; 320 u64_stats_update_end(&rx_ring->syncp); 321 322 /* Trigger device reset */ 323 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 324 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); 325 return -EFAULT; 326 } 327 328 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) 329 * @adapter: network interface device structure 330 * @qid: queue index 331 * 332 * Returns 0 on success, negative on failure 333 */ 334 static int ena_setup_rx_resources(struct ena_adapter *adapter, 335 u32 qid) 336 { 337 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 338 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 339 int size, node, i; 340 341 if (rx_ring->rx_buffer_info) { 342 netif_err(adapter, ifup, adapter->netdev, 343 "rx_buffer_info is not NULL"); 344 return -EEXIST; 345 } 346 347 /* alloc extra element so in rx path 348 * we can always prefetch rx_info + 1 349 */ 350 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); 351 node = cpu_to_node(ena_irq->cpu); 352 353 rx_ring->rx_buffer_info = vzalloc_node(size, node); 354 if (!rx_ring->rx_buffer_info) { 355 rx_ring->rx_buffer_info = vzalloc(size); 356 if (!rx_ring->rx_buffer_info) 357 return -ENOMEM; 358 } 359 360 size = sizeof(u16) * rx_ring->ring_size; 361 rx_ring->free_rx_ids = vzalloc_node(size, node); 362 if (!rx_ring->free_rx_ids) { 363 rx_ring->free_rx_ids = vzalloc(size); 364 if (!rx_ring->free_rx_ids) { 365 vfree(rx_ring->rx_buffer_info); 366 return -ENOMEM; 367 } 368 } 369 370 /* Req id ring for receiving RX pkts out of order */ 371 for (i = 0; i < rx_ring->ring_size; i++) 372 rx_ring->free_rx_ids[i] = i; 373 374 /* Reset rx statistics */ 375 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); 376 377 rx_ring->next_to_clean = 0; 378 rx_ring->next_to_use = 0; 379 rx_ring->cpu = ena_irq->cpu; 380 381 return 0; 382 } 383 384 /* ena_free_rx_resources - Free I/O Rx Resources 385 * @adapter: network interface device structure 386 * @qid: queue index 387 * 388 * Free all receive software resources 389 */ 390 static void ena_free_rx_resources(struct ena_adapter *adapter, 391 u32 qid) 392 { 393 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 394 395 vfree(rx_ring->rx_buffer_info); 396 rx_ring->rx_buffer_info = NULL; 397 398 vfree(rx_ring->free_rx_ids); 399 rx_ring->free_rx_ids = NULL; 400 } 401 402 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues 403 * @adapter: board private structure 404 * 405 * Return 0 on success, negative on failure 406 */ 407 static int ena_setup_all_rx_resources(struct ena_adapter *adapter) 408 { 409 int i, rc = 0; 410 411 for (i = 0; i < adapter->num_queues; i++) { 412 rc = ena_setup_rx_resources(adapter, i); 413 if (rc) 414 goto err_setup_rx; 415 } 416 417 return 0; 418 419 err_setup_rx: 420 421 netif_err(adapter, ifup, adapter->netdev, 422 "Rx queue %d: allocation failed\n", i); 423 424 /* rewind the index freeing the rings as we go */ 425 while (i--) 426 ena_free_rx_resources(adapter, i); 427 return rc; 428 } 429 430 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues 431 * @adapter: board private structure 432 * 433 * Free all receive software resources 434 */ 435 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) 436 { 437 int i; 438 439 for (i = 0; i < adapter->num_queues; i++) 440 ena_free_rx_resources(adapter, i); 441 } 442 443 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, 444 struct ena_rx_buffer *rx_info, gfp_t gfp) 445 { 446 struct ena_com_buf *ena_buf; 447 struct page *page; 448 dma_addr_t dma; 449 450 /* if previous allocated page is not used */ 451 if (unlikely(rx_info->page)) 452 return 0; 453 454 page = alloc_page(gfp); 455 if (unlikely(!page)) { 456 u64_stats_update_begin(&rx_ring->syncp); 457 rx_ring->rx_stats.page_alloc_fail++; 458 u64_stats_update_end(&rx_ring->syncp); 459 return -ENOMEM; 460 } 461 462 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, 463 DMA_FROM_DEVICE); 464 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 465 u64_stats_update_begin(&rx_ring->syncp); 466 rx_ring->rx_stats.dma_mapping_err++; 467 u64_stats_update_end(&rx_ring->syncp); 468 469 __free_page(page); 470 return -EIO; 471 } 472 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 473 "alloc page %p, rx_info %p\n", page, rx_info); 474 475 rx_info->page = page; 476 rx_info->page_offset = 0; 477 ena_buf = &rx_info->ena_buf; 478 ena_buf->paddr = dma; 479 ena_buf->len = PAGE_SIZE; 480 481 return 0; 482 } 483 484 static void ena_free_rx_page(struct ena_ring *rx_ring, 485 struct ena_rx_buffer *rx_info) 486 { 487 struct page *page = rx_info->page; 488 struct ena_com_buf *ena_buf = &rx_info->ena_buf; 489 490 if (unlikely(!page)) { 491 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 492 "Trying to free unallocated buffer\n"); 493 return; 494 } 495 496 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, 497 DMA_FROM_DEVICE); 498 499 __free_page(page); 500 rx_info->page = NULL; 501 } 502 503 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) 504 { 505 u16 next_to_use, req_id; 506 u32 i; 507 int rc; 508 509 next_to_use = rx_ring->next_to_use; 510 511 for (i = 0; i < num; i++) { 512 struct ena_rx_buffer *rx_info; 513 514 req_id = rx_ring->free_rx_ids[next_to_use]; 515 rc = validate_rx_req_id(rx_ring, req_id); 516 if (unlikely(rc < 0)) 517 break; 518 519 rx_info = &rx_ring->rx_buffer_info[req_id]; 520 521 522 rc = ena_alloc_rx_page(rx_ring, rx_info, 523 GFP_ATOMIC | __GFP_COMP); 524 if (unlikely(rc < 0)) { 525 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 526 "failed to alloc buffer for rx queue %d\n", 527 rx_ring->qid); 528 break; 529 } 530 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 531 &rx_info->ena_buf, 532 req_id); 533 if (unlikely(rc)) { 534 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 535 "failed to add buffer for rx queue %d\n", 536 rx_ring->qid); 537 break; 538 } 539 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 540 rx_ring->ring_size); 541 } 542 543 if (unlikely(i < num)) { 544 u64_stats_update_begin(&rx_ring->syncp); 545 rx_ring->rx_stats.refil_partial++; 546 u64_stats_update_end(&rx_ring->syncp); 547 netdev_warn(rx_ring->netdev, 548 "refilled rx qid %d with only %d buffers (from %d)\n", 549 rx_ring->qid, i, num); 550 } 551 552 if (likely(i)) { 553 /* Add memory barrier to make sure the desc were written before 554 * issue a doorbell 555 */ 556 wmb(); 557 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 558 } 559 560 rx_ring->next_to_use = next_to_use; 561 562 return i; 563 } 564 565 static void ena_free_rx_bufs(struct ena_adapter *adapter, 566 u32 qid) 567 { 568 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 569 u32 i; 570 571 for (i = 0; i < rx_ring->ring_size; i++) { 572 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 573 574 if (rx_info->page) 575 ena_free_rx_page(rx_ring, rx_info); 576 } 577 } 578 579 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers 580 * @adapter: board private structure 581 * 582 */ 583 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) 584 { 585 struct ena_ring *rx_ring; 586 int i, rc, bufs_num; 587 588 for (i = 0; i < adapter->num_queues; i++) { 589 rx_ring = &adapter->rx_ring[i]; 590 bufs_num = rx_ring->ring_size - 1; 591 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 592 593 if (unlikely(rc != bufs_num)) 594 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 595 "refilling Queue %d failed. allocated %d buffers from: %d\n", 596 i, rc, bufs_num); 597 } 598 } 599 600 static void ena_free_all_rx_bufs(struct ena_adapter *adapter) 601 { 602 int i; 603 604 for (i = 0; i < adapter->num_queues; i++) 605 ena_free_rx_bufs(adapter, i); 606 } 607 608 /* ena_free_tx_bufs - Free Tx Buffers per Queue 609 * @tx_ring: TX ring for which buffers be freed 610 */ 611 static void ena_free_tx_bufs(struct ena_ring *tx_ring) 612 { 613 bool print_once = true; 614 u32 i; 615 616 for (i = 0; i < tx_ring->ring_size; i++) { 617 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 618 struct ena_com_buf *ena_buf; 619 int nr_frags; 620 int j; 621 622 if (!tx_info->skb) 623 continue; 624 625 if (print_once) { 626 netdev_notice(tx_ring->netdev, 627 "free uncompleted tx skb qid %d idx 0x%x\n", 628 tx_ring->qid, i); 629 print_once = false; 630 } else { 631 netdev_dbg(tx_ring->netdev, 632 "free uncompleted tx skb qid %d idx 0x%x\n", 633 tx_ring->qid, i); 634 } 635 636 ena_buf = tx_info->bufs; 637 dma_unmap_single(tx_ring->dev, 638 ena_buf->paddr, 639 ena_buf->len, 640 DMA_TO_DEVICE); 641 642 /* unmap remaining mapped pages */ 643 nr_frags = tx_info->num_of_bufs - 1; 644 for (j = 0; j < nr_frags; j++) { 645 ena_buf++; 646 dma_unmap_page(tx_ring->dev, 647 ena_buf->paddr, 648 ena_buf->len, 649 DMA_TO_DEVICE); 650 } 651 652 dev_kfree_skb_any(tx_info->skb); 653 } 654 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, 655 tx_ring->qid)); 656 } 657 658 static void ena_free_all_tx_bufs(struct ena_adapter *adapter) 659 { 660 struct ena_ring *tx_ring; 661 int i; 662 663 for (i = 0; i < adapter->num_queues; i++) { 664 tx_ring = &adapter->tx_ring[i]; 665 ena_free_tx_bufs(tx_ring); 666 } 667 } 668 669 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) 670 { 671 u16 ena_qid; 672 int i; 673 674 for (i = 0; i < adapter->num_queues; i++) { 675 ena_qid = ENA_IO_TXQ_IDX(i); 676 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 677 } 678 } 679 680 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) 681 { 682 u16 ena_qid; 683 int i; 684 685 for (i = 0; i < adapter->num_queues; i++) { 686 ena_qid = ENA_IO_RXQ_IDX(i); 687 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 688 } 689 } 690 691 static void ena_destroy_all_io_queues(struct ena_adapter *adapter) 692 { 693 ena_destroy_all_tx_queues(adapter); 694 ena_destroy_all_rx_queues(adapter); 695 } 696 697 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 698 { 699 struct ena_tx_buffer *tx_info = NULL; 700 701 if (likely(req_id < tx_ring->ring_size)) { 702 tx_info = &tx_ring->tx_buffer_info[req_id]; 703 if (likely(tx_info->skb)) 704 return 0; 705 } 706 707 if (tx_info) 708 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, 709 "tx_info doesn't have valid skb\n"); 710 else 711 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, 712 "Invalid req_id: %hu\n", req_id); 713 714 u64_stats_update_begin(&tx_ring->syncp); 715 tx_ring->tx_stats.bad_req_id++; 716 u64_stats_update_end(&tx_ring->syncp); 717 718 /* Trigger device reset */ 719 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 720 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); 721 return -EFAULT; 722 } 723 724 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) 725 { 726 struct netdev_queue *txq; 727 bool above_thresh; 728 u32 tx_bytes = 0; 729 u32 total_done = 0; 730 u16 next_to_clean; 731 u16 req_id; 732 int tx_pkts = 0; 733 int rc; 734 735 next_to_clean = tx_ring->next_to_clean; 736 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); 737 738 while (tx_pkts < budget) { 739 struct ena_tx_buffer *tx_info; 740 struct sk_buff *skb; 741 struct ena_com_buf *ena_buf; 742 int i, nr_frags; 743 744 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, 745 &req_id); 746 if (rc) 747 break; 748 749 rc = validate_tx_req_id(tx_ring, req_id); 750 if (rc) 751 break; 752 753 tx_info = &tx_ring->tx_buffer_info[req_id]; 754 skb = tx_info->skb; 755 756 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ 757 prefetch(&skb->end); 758 759 tx_info->skb = NULL; 760 tx_info->last_jiffies = 0; 761 762 if (likely(tx_info->num_of_bufs != 0)) { 763 ena_buf = tx_info->bufs; 764 765 dma_unmap_single(tx_ring->dev, 766 dma_unmap_addr(ena_buf, paddr), 767 dma_unmap_len(ena_buf, len), 768 DMA_TO_DEVICE); 769 770 /* unmap remaining mapped pages */ 771 nr_frags = tx_info->num_of_bufs - 1; 772 for (i = 0; i < nr_frags; i++) { 773 ena_buf++; 774 dma_unmap_page(tx_ring->dev, 775 dma_unmap_addr(ena_buf, paddr), 776 dma_unmap_len(ena_buf, len), 777 DMA_TO_DEVICE); 778 } 779 } 780 781 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 782 "tx_poll: q %d skb %p completed\n", tx_ring->qid, 783 skb); 784 785 tx_bytes += skb->len; 786 dev_kfree_skb(skb); 787 tx_pkts++; 788 total_done += tx_info->tx_descs; 789 790 tx_ring->free_tx_ids[next_to_clean] = req_id; 791 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 792 tx_ring->ring_size); 793 } 794 795 tx_ring->next_to_clean = next_to_clean; 796 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); 797 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 798 799 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 800 801 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 802 "tx_poll: q %d done. total pkts: %d\n", 803 tx_ring->qid, tx_pkts); 804 805 /* need to make the rings circular update visible to 806 * ena_start_xmit() before checking for netif_queue_stopped(). 807 */ 808 smp_mb(); 809 810 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > 811 ENA_TX_WAKEUP_THRESH; 812 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { 813 __netif_tx_lock(txq, smp_processor_id()); 814 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > 815 ENA_TX_WAKEUP_THRESH; 816 if (netif_tx_queue_stopped(txq) && above_thresh) { 817 netif_tx_wake_queue(txq); 818 u64_stats_update_begin(&tx_ring->syncp); 819 tx_ring->tx_stats.queue_wakeup++; 820 u64_stats_update_end(&tx_ring->syncp); 821 } 822 __netif_tx_unlock(txq); 823 } 824 825 tx_ring->per_napi_bytes += tx_bytes; 826 tx_ring->per_napi_packets += tx_pkts; 827 828 return tx_pkts; 829 } 830 831 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) 832 { 833 struct sk_buff *skb; 834 835 if (frags) 836 skb = napi_get_frags(rx_ring->napi); 837 else 838 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 839 rx_ring->rx_copybreak); 840 841 if (unlikely(!skb)) { 842 u64_stats_update_begin(&rx_ring->syncp); 843 rx_ring->rx_stats.skb_alloc_fail++; 844 u64_stats_update_end(&rx_ring->syncp); 845 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 846 "Failed to allocate skb. frags: %d\n", frags); 847 return NULL; 848 } 849 850 return skb; 851 } 852 853 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, 854 struct ena_com_rx_buf_info *ena_bufs, 855 u32 descs, 856 u16 *next_to_clean) 857 { 858 struct sk_buff *skb; 859 struct ena_rx_buffer *rx_info; 860 u16 len, req_id, buf = 0; 861 void *va; 862 863 len = ena_bufs[buf].len; 864 req_id = ena_bufs[buf].req_id; 865 rx_info = &rx_ring->rx_buffer_info[req_id]; 866 867 if (unlikely(!rx_info->page)) { 868 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 869 "Page is NULL\n"); 870 return NULL; 871 } 872 873 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 874 "rx_info %p page %p\n", 875 rx_info, rx_info->page); 876 877 /* save virt address of first buffer */ 878 va = page_address(rx_info->page) + rx_info->page_offset; 879 prefetch(va + NET_IP_ALIGN); 880 881 if (len <= rx_ring->rx_copybreak) { 882 skb = ena_alloc_skb(rx_ring, false); 883 if (unlikely(!skb)) 884 return NULL; 885 886 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 887 "rx allocated small packet. len %d. data_len %d\n", 888 skb->len, skb->data_len); 889 890 /* sync this buffer for CPU use */ 891 dma_sync_single_for_cpu(rx_ring->dev, 892 dma_unmap_addr(&rx_info->ena_buf, paddr), 893 len, 894 DMA_FROM_DEVICE); 895 skb_copy_to_linear_data(skb, va, len); 896 dma_sync_single_for_device(rx_ring->dev, 897 dma_unmap_addr(&rx_info->ena_buf, paddr), 898 len, 899 DMA_FROM_DEVICE); 900 901 skb_put(skb, len); 902 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 903 rx_ring->free_rx_ids[*next_to_clean] = req_id; 904 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, 905 rx_ring->ring_size); 906 return skb; 907 } 908 909 skb = ena_alloc_skb(rx_ring, true); 910 if (unlikely(!skb)) 911 return NULL; 912 913 do { 914 dma_unmap_page(rx_ring->dev, 915 dma_unmap_addr(&rx_info->ena_buf, paddr), 916 PAGE_SIZE, DMA_FROM_DEVICE); 917 918 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 919 rx_info->page_offset, len, PAGE_SIZE); 920 921 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 922 "rx skb updated. len %d. data_len %d\n", 923 skb->len, skb->data_len); 924 925 rx_info->page = NULL; 926 927 rx_ring->free_rx_ids[*next_to_clean] = req_id; 928 *next_to_clean = 929 ENA_RX_RING_IDX_NEXT(*next_to_clean, 930 rx_ring->ring_size); 931 if (likely(--descs == 0)) 932 break; 933 934 buf++; 935 len = ena_bufs[buf].len; 936 req_id = ena_bufs[buf].req_id; 937 rx_info = &rx_ring->rx_buffer_info[req_id]; 938 } while (1); 939 940 return skb; 941 } 942 943 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum 944 * @adapter: structure containing adapter specific data 945 * @ena_rx_ctx: received packet context/metadata 946 * @skb: skb currently being received and modified 947 */ 948 static inline void ena_rx_checksum(struct ena_ring *rx_ring, 949 struct ena_com_rx_ctx *ena_rx_ctx, 950 struct sk_buff *skb) 951 { 952 /* Rx csum disabled */ 953 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { 954 skb->ip_summed = CHECKSUM_NONE; 955 return; 956 } 957 958 /* For fragmented packets the checksum isn't valid */ 959 if (ena_rx_ctx->frag) { 960 skb->ip_summed = CHECKSUM_NONE; 961 return; 962 } 963 964 /* if IP and error */ 965 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 966 (ena_rx_ctx->l3_csum_err))) { 967 /* ipv4 checksum error */ 968 skb->ip_summed = CHECKSUM_NONE; 969 u64_stats_update_begin(&rx_ring->syncp); 970 rx_ring->rx_stats.bad_csum++; 971 u64_stats_update_end(&rx_ring->syncp); 972 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 973 "RX IPv4 header checksum error\n"); 974 return; 975 } 976 977 /* if TCP/UDP */ 978 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 979 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { 980 if (unlikely(ena_rx_ctx->l4_csum_err)) { 981 /* TCP/UDP checksum error */ 982 u64_stats_update_begin(&rx_ring->syncp); 983 rx_ring->rx_stats.bad_csum++; 984 u64_stats_update_end(&rx_ring->syncp); 985 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 986 "RX L4 checksum error\n"); 987 skb->ip_summed = CHECKSUM_NONE; 988 return; 989 } 990 991 skb->ip_summed = CHECKSUM_UNNECESSARY; 992 } 993 } 994 995 static void ena_set_rx_hash(struct ena_ring *rx_ring, 996 struct ena_com_rx_ctx *ena_rx_ctx, 997 struct sk_buff *skb) 998 { 999 enum pkt_hash_types hash_type; 1000 1001 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { 1002 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 1003 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) 1004 1005 hash_type = PKT_HASH_TYPE_L4; 1006 else 1007 hash_type = PKT_HASH_TYPE_NONE; 1008 1009 /* Override hash type if the packet is fragmented */ 1010 if (ena_rx_ctx->frag) 1011 hash_type = PKT_HASH_TYPE_NONE; 1012 1013 skb_set_hash(skb, ena_rx_ctx->hash, hash_type); 1014 } 1015 } 1016 1017 /* ena_clean_rx_irq - Cleanup RX irq 1018 * @rx_ring: RX ring to clean 1019 * @napi: napi handler 1020 * @budget: how many packets driver is allowed to clean 1021 * 1022 * Returns the number of cleaned buffers. 1023 */ 1024 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, 1025 u32 budget) 1026 { 1027 u16 next_to_clean = rx_ring->next_to_clean; 1028 u32 res_budget, work_done; 1029 1030 struct ena_com_rx_ctx ena_rx_ctx; 1031 struct ena_adapter *adapter; 1032 struct sk_buff *skb; 1033 int refill_required; 1034 int refill_threshold; 1035 int rc = 0; 1036 int total_len = 0; 1037 int rx_copybreak_pkt = 0; 1038 int i; 1039 1040 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1041 "%s qid %d\n", __func__, rx_ring->qid); 1042 res_budget = budget; 1043 1044 do { 1045 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1046 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 1047 ena_rx_ctx.descs = 0; 1048 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1049 rx_ring->ena_com_io_sq, 1050 &ena_rx_ctx); 1051 if (unlikely(rc)) 1052 goto error; 1053 1054 if (unlikely(ena_rx_ctx.descs == 0)) 1055 break; 1056 1057 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1058 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", 1059 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 1060 ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 1061 1062 /* allocate skb and fill it */ 1063 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, 1064 &next_to_clean); 1065 1066 /* exit if we failed to retrieve a buffer */ 1067 if (unlikely(!skb)) { 1068 for (i = 0; i < ena_rx_ctx.descs; i++) { 1069 rx_ring->free_tx_ids[next_to_clean] = 1070 rx_ring->ena_bufs[i].req_id; 1071 next_to_clean = 1072 ENA_RX_RING_IDX_NEXT(next_to_clean, 1073 rx_ring->ring_size); 1074 } 1075 break; 1076 } 1077 1078 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); 1079 1080 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); 1081 1082 skb_record_rx_queue(skb, rx_ring->qid); 1083 1084 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { 1085 total_len += rx_ring->ena_bufs[0].len; 1086 rx_copybreak_pkt++; 1087 napi_gro_receive(napi, skb); 1088 } else { 1089 total_len += skb->len; 1090 napi_gro_frags(napi); 1091 } 1092 1093 res_budget--; 1094 } while (likely(res_budget)); 1095 1096 work_done = budget - res_budget; 1097 rx_ring->per_napi_bytes += total_len; 1098 rx_ring->per_napi_packets += work_done; 1099 u64_stats_update_begin(&rx_ring->syncp); 1100 rx_ring->rx_stats.bytes += total_len; 1101 rx_ring->rx_stats.cnt += work_done; 1102 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; 1103 u64_stats_update_end(&rx_ring->syncp); 1104 1105 rx_ring->next_to_clean = next_to_clean; 1106 1107 refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); 1108 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; 1109 1110 /* Optimization, try to batch new rx buffers */ 1111 if (refill_required > refill_threshold) { 1112 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 1113 ena_refill_rx_bufs(rx_ring, refill_required); 1114 } 1115 1116 return work_done; 1117 1118 error: 1119 adapter = netdev_priv(rx_ring->netdev); 1120 1121 u64_stats_update_begin(&rx_ring->syncp); 1122 rx_ring->rx_stats.bad_desc_num++; 1123 u64_stats_update_end(&rx_ring->syncp); 1124 1125 /* Too many desc from the device. Trigger reset */ 1126 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1127 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 1128 1129 return 0; 1130 } 1131 1132 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, 1133 struct ena_ring *tx_ring) 1134 { 1135 /* We apply adaptive moderation on Rx path only. 1136 * Tx uses static interrupt moderation. 1137 */ 1138 ena_com_calculate_interrupt_delay(rx_ring->ena_dev, 1139 rx_ring->per_napi_packets, 1140 rx_ring->per_napi_bytes, 1141 &rx_ring->smoothed_interval, 1142 &rx_ring->moder_tbl_idx); 1143 1144 /* Reset per napi packets/bytes */ 1145 tx_ring->per_napi_packets = 0; 1146 tx_ring->per_napi_bytes = 0; 1147 rx_ring->per_napi_packets = 0; 1148 rx_ring->per_napi_bytes = 0; 1149 } 1150 1151 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, 1152 struct ena_ring *rx_ring) 1153 { 1154 struct ena_eth_io_intr_reg intr_reg; 1155 1156 /* Update intr register: rx intr delay, 1157 * tx intr delay and interrupt unmask 1158 */ 1159 ena_com_update_intr_reg(&intr_reg, 1160 rx_ring->smoothed_interval, 1161 tx_ring->smoothed_interval, 1162 true); 1163 1164 /* It is a shared MSI-X. 1165 * Tx and Rx CQ have pointer to it. 1166 * So we use one of them to reach the intr reg 1167 */ 1168 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); 1169 } 1170 1171 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1172 struct ena_ring *rx_ring) 1173 { 1174 int cpu = get_cpu(); 1175 int numa_node; 1176 1177 /* Check only one ring since the 2 rings are running on the same cpu */ 1178 if (likely(tx_ring->cpu == cpu)) 1179 goto out; 1180 1181 numa_node = cpu_to_node(cpu); 1182 put_cpu(); 1183 1184 if (numa_node != NUMA_NO_NODE) { 1185 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); 1186 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); 1187 } 1188 1189 tx_ring->cpu = cpu; 1190 rx_ring->cpu = cpu; 1191 1192 return; 1193 out: 1194 put_cpu(); 1195 } 1196 1197 static int ena_io_poll(struct napi_struct *napi, int budget) 1198 { 1199 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1200 struct ena_ring *tx_ring, *rx_ring; 1201 1202 u32 tx_work_done; 1203 u32 rx_work_done; 1204 int tx_budget; 1205 int napi_comp_call = 0; 1206 int ret; 1207 1208 tx_ring = ena_napi->tx_ring; 1209 rx_ring = ena_napi->rx_ring; 1210 1211 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; 1212 1213 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1214 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { 1215 napi_complete_done(napi, 0); 1216 return 0; 1217 } 1218 1219 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); 1220 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); 1221 1222 /* If the device is about to reset or down, avoid unmask 1223 * the interrupt and return 0 so NAPI won't reschedule 1224 */ 1225 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1226 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { 1227 napi_complete_done(napi, 0); 1228 ret = 0; 1229 1230 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { 1231 napi_comp_call = 1; 1232 1233 /* Update numa and unmask the interrupt only when schedule 1234 * from the interrupt context (vs from sk_busy_loop) 1235 */ 1236 if (napi_complete_done(napi, rx_work_done)) { 1237 /* Tx and Rx share the same interrupt vector */ 1238 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1239 ena_adjust_intr_moderation(rx_ring, tx_ring); 1240 1241 ena_unmask_interrupt(tx_ring, rx_ring); 1242 } 1243 1244 ena_update_ring_numa_node(tx_ring, rx_ring); 1245 1246 ret = rx_work_done; 1247 } else { 1248 ret = budget; 1249 } 1250 1251 u64_stats_update_begin(&tx_ring->syncp); 1252 tx_ring->tx_stats.napi_comp += napi_comp_call; 1253 tx_ring->tx_stats.tx_poll++; 1254 u64_stats_update_end(&tx_ring->syncp); 1255 1256 return ret; 1257 } 1258 1259 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) 1260 { 1261 struct ena_adapter *adapter = (struct ena_adapter *)data; 1262 1263 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1264 1265 /* Don't call the aenq handler before probe is done */ 1266 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) 1267 ena_com_aenq_intr_handler(adapter->ena_dev, data); 1268 1269 return IRQ_HANDLED; 1270 } 1271 1272 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx 1273 * @irq: interrupt number 1274 * @data: pointer to a network interface private napi device structure 1275 */ 1276 static irqreturn_t ena_intr_msix_io(int irq, void *data) 1277 { 1278 struct ena_napi *ena_napi = data; 1279 1280 napi_schedule_irqoff(&ena_napi->napi); 1281 1282 return IRQ_HANDLED; 1283 } 1284 1285 /* Reserve a single MSI-X vector for management (admin + aenq). 1286 * plus reserve one vector for each potential io queue. 1287 * the number of potential io queues is the minimum of what the device 1288 * supports and the number of vCPUs. 1289 */ 1290 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) 1291 { 1292 int msix_vecs, irq_cnt; 1293 1294 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 1295 netif_err(adapter, probe, adapter->netdev, 1296 "Error, MSI-X is already enabled\n"); 1297 return -EPERM; 1298 } 1299 1300 /* Reserved the max msix vectors we might need */ 1301 msix_vecs = ENA_MAX_MSIX_VEC(num_queues); 1302 1303 netif_dbg(adapter, probe, adapter->netdev, 1304 "trying to enable MSI-X, vectors %d\n", msix_vecs); 1305 1306 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, 1307 msix_vecs, PCI_IRQ_MSIX); 1308 1309 if (irq_cnt < 0) { 1310 netif_err(adapter, probe, adapter->netdev, 1311 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); 1312 return -ENOSPC; 1313 } 1314 1315 if (irq_cnt != msix_vecs) { 1316 netif_notice(adapter, probe, adapter->netdev, 1317 "enable only %d MSI-X (out of %d), reduce the number of queues\n", 1318 irq_cnt, msix_vecs); 1319 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; 1320 } 1321 1322 if (ena_init_rx_cpu_rmap(adapter)) 1323 netif_warn(adapter, probe, adapter->netdev, 1324 "Failed to map IRQs to CPUs\n"); 1325 1326 adapter->msix_vecs = irq_cnt; 1327 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); 1328 1329 return 0; 1330 } 1331 1332 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) 1333 { 1334 u32 cpu; 1335 1336 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 1337 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 1338 pci_name(adapter->pdev)); 1339 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = 1340 ena_intr_msix_mgmnt; 1341 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 1342 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 1343 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); 1344 cpu = cpumask_first(cpu_online_mask); 1345 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; 1346 cpumask_set_cpu(cpu, 1347 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); 1348 } 1349 1350 static void ena_setup_io_intr(struct ena_adapter *adapter) 1351 { 1352 struct net_device *netdev; 1353 int irq_idx, i, cpu; 1354 1355 netdev = adapter->netdev; 1356 1357 for (i = 0; i < adapter->num_queues; i++) { 1358 irq_idx = ENA_IO_IRQ_IDX(i); 1359 cpu = i % num_online_cpus(); 1360 1361 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 1362 "%s-Tx-Rx-%d", netdev->name, i); 1363 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; 1364 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; 1365 adapter->irq_tbl[irq_idx].vector = 1366 pci_irq_vector(adapter->pdev, irq_idx); 1367 adapter->irq_tbl[irq_idx].cpu = cpu; 1368 1369 cpumask_set_cpu(cpu, 1370 &adapter->irq_tbl[irq_idx].affinity_hint_mask); 1371 } 1372 } 1373 1374 static int ena_request_mgmnt_irq(struct ena_adapter *adapter) 1375 { 1376 unsigned long flags = 0; 1377 struct ena_irq *irq; 1378 int rc; 1379 1380 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1381 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 1382 irq->data); 1383 if (rc) { 1384 netif_err(adapter, probe, adapter->netdev, 1385 "failed to request admin irq\n"); 1386 return rc; 1387 } 1388 1389 netif_dbg(adapter, probe, adapter->netdev, 1390 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", 1391 irq->affinity_hint_mask.bits[0], irq->vector); 1392 1393 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 1394 1395 return rc; 1396 } 1397 1398 static int ena_request_io_irq(struct ena_adapter *adapter) 1399 { 1400 unsigned long flags = 0; 1401 struct ena_irq *irq; 1402 int rc = 0, i, k; 1403 1404 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 1405 netif_err(adapter, ifup, adapter->netdev, 1406 "Failed to request I/O IRQ: MSI-X is not enabled\n"); 1407 return -EINVAL; 1408 } 1409 1410 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1411 irq = &adapter->irq_tbl[i]; 1412 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 1413 irq->data); 1414 if (rc) { 1415 netif_err(adapter, ifup, adapter->netdev, 1416 "Failed to request I/O IRQ. index %d rc %d\n", 1417 i, rc); 1418 goto err; 1419 } 1420 1421 netif_dbg(adapter, ifup, adapter->netdev, 1422 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", 1423 i, irq->affinity_hint_mask.bits[0], irq->vector); 1424 1425 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 1426 } 1427 1428 return rc; 1429 1430 err: 1431 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { 1432 irq = &adapter->irq_tbl[k]; 1433 free_irq(irq->vector, irq->data); 1434 } 1435 1436 return rc; 1437 } 1438 1439 static void ena_free_mgmnt_irq(struct ena_adapter *adapter) 1440 { 1441 struct ena_irq *irq; 1442 1443 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1444 synchronize_irq(irq->vector); 1445 irq_set_affinity_hint(irq->vector, NULL); 1446 free_irq(irq->vector, irq->data); 1447 } 1448 1449 static void ena_free_io_irq(struct ena_adapter *adapter) 1450 { 1451 struct ena_irq *irq; 1452 int i; 1453 1454 #ifdef CONFIG_RFS_ACCEL 1455 if (adapter->msix_vecs >= 1) { 1456 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 1457 adapter->netdev->rx_cpu_rmap = NULL; 1458 } 1459 #endif /* CONFIG_RFS_ACCEL */ 1460 1461 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1462 irq = &adapter->irq_tbl[i]; 1463 irq_set_affinity_hint(irq->vector, NULL); 1464 free_irq(irq->vector, irq->data); 1465 } 1466 } 1467 1468 static void ena_disable_msix(struct ena_adapter *adapter) 1469 { 1470 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) 1471 pci_free_irq_vectors(adapter->pdev); 1472 } 1473 1474 static void ena_disable_io_intr_sync(struct ena_adapter *adapter) 1475 { 1476 int i; 1477 1478 if (!netif_running(adapter->netdev)) 1479 return; 1480 1481 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) 1482 synchronize_irq(adapter->irq_tbl[i].vector); 1483 } 1484 1485 static void ena_del_napi(struct ena_adapter *adapter) 1486 { 1487 int i; 1488 1489 for (i = 0; i < adapter->num_queues; i++) 1490 netif_napi_del(&adapter->ena_napi[i].napi); 1491 } 1492 1493 static void ena_init_napi(struct ena_adapter *adapter) 1494 { 1495 struct ena_napi *napi; 1496 int i; 1497 1498 for (i = 0; i < adapter->num_queues; i++) { 1499 napi = &adapter->ena_napi[i]; 1500 1501 netif_napi_add(adapter->netdev, 1502 &adapter->ena_napi[i].napi, 1503 ena_io_poll, 1504 ENA_NAPI_BUDGET); 1505 napi->rx_ring = &adapter->rx_ring[i]; 1506 napi->tx_ring = &adapter->tx_ring[i]; 1507 napi->qid = i; 1508 } 1509 } 1510 1511 static void ena_napi_disable_all(struct ena_adapter *adapter) 1512 { 1513 int i; 1514 1515 for (i = 0; i < adapter->num_queues; i++) 1516 napi_disable(&adapter->ena_napi[i].napi); 1517 } 1518 1519 static void ena_napi_enable_all(struct ena_adapter *adapter) 1520 { 1521 int i; 1522 1523 for (i = 0; i < adapter->num_queues; i++) 1524 napi_enable(&adapter->ena_napi[i].napi); 1525 } 1526 1527 static void ena_restore_ethtool_params(struct ena_adapter *adapter) 1528 { 1529 adapter->tx_usecs = 0; 1530 adapter->rx_usecs = 0; 1531 adapter->tx_frames = 1; 1532 adapter->rx_frames = 1; 1533 } 1534 1535 /* Configure the Rx forwarding */ 1536 static int ena_rss_configure(struct ena_adapter *adapter) 1537 { 1538 struct ena_com_dev *ena_dev = adapter->ena_dev; 1539 int rc; 1540 1541 /* In case the RSS table wasn't initialized by probe */ 1542 if (!ena_dev->rss.tbl_log_size) { 1543 rc = ena_rss_init_default(adapter); 1544 if (rc && (rc != -EOPNOTSUPP)) { 1545 netif_err(adapter, ifup, adapter->netdev, 1546 "Failed to init RSS rc: %d\n", rc); 1547 return rc; 1548 } 1549 } 1550 1551 /* Set indirect table */ 1552 rc = ena_com_indirect_table_set(ena_dev); 1553 if (unlikely(rc && rc != -EOPNOTSUPP)) 1554 return rc; 1555 1556 /* Configure hash function (if supported) */ 1557 rc = ena_com_set_hash_function(ena_dev); 1558 if (unlikely(rc && (rc != -EOPNOTSUPP))) 1559 return rc; 1560 1561 /* Configure hash inputs (if supported) */ 1562 rc = ena_com_set_hash_ctrl(ena_dev); 1563 if (unlikely(rc && (rc != -EOPNOTSUPP))) 1564 return rc; 1565 1566 return 0; 1567 } 1568 1569 static int ena_up_complete(struct ena_adapter *adapter) 1570 { 1571 int rc; 1572 1573 rc = ena_rss_configure(adapter); 1574 if (rc) 1575 return rc; 1576 1577 ena_init_napi(adapter); 1578 1579 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 1580 1581 ena_refill_all_rx_bufs(adapter); 1582 1583 /* enable transmits */ 1584 netif_tx_start_all_queues(adapter->netdev); 1585 1586 ena_restore_ethtool_params(adapter); 1587 1588 ena_napi_enable_all(adapter); 1589 1590 return 0; 1591 } 1592 1593 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) 1594 { 1595 struct ena_com_create_io_ctx ctx = { 0 }; 1596 struct ena_com_dev *ena_dev; 1597 struct ena_ring *tx_ring; 1598 u32 msix_vector; 1599 u16 ena_qid; 1600 int rc; 1601 1602 ena_dev = adapter->ena_dev; 1603 1604 tx_ring = &adapter->tx_ring[qid]; 1605 msix_vector = ENA_IO_IRQ_IDX(qid); 1606 ena_qid = ENA_IO_TXQ_IDX(qid); 1607 1608 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1609 ctx.qid = ena_qid; 1610 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1611 ctx.msix_vector = msix_vector; 1612 ctx.queue_size = adapter->tx_ring_size; 1613 ctx.numa_node = cpu_to_node(tx_ring->cpu); 1614 1615 rc = ena_com_create_io_queue(ena_dev, &ctx); 1616 if (rc) { 1617 netif_err(adapter, ifup, adapter->netdev, 1618 "Failed to create I/O TX queue num %d rc: %d\n", 1619 qid, rc); 1620 return rc; 1621 } 1622 1623 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1624 &tx_ring->ena_com_io_sq, 1625 &tx_ring->ena_com_io_cq); 1626 if (rc) { 1627 netif_err(adapter, ifup, adapter->netdev, 1628 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1629 qid, rc); 1630 ena_com_destroy_io_queue(ena_dev, ena_qid); 1631 return rc; 1632 } 1633 1634 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); 1635 return rc; 1636 } 1637 1638 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) 1639 { 1640 struct ena_com_dev *ena_dev = adapter->ena_dev; 1641 int rc, i; 1642 1643 for (i = 0; i < adapter->num_queues; i++) { 1644 rc = ena_create_io_tx_queue(adapter, i); 1645 if (rc) 1646 goto create_err; 1647 } 1648 1649 return 0; 1650 1651 create_err: 1652 while (i--) 1653 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 1654 1655 return rc; 1656 } 1657 1658 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) 1659 { 1660 struct ena_com_dev *ena_dev; 1661 struct ena_com_create_io_ctx ctx = { 0 }; 1662 struct ena_ring *rx_ring; 1663 u32 msix_vector; 1664 u16 ena_qid; 1665 int rc; 1666 1667 ena_dev = adapter->ena_dev; 1668 1669 rx_ring = &adapter->rx_ring[qid]; 1670 msix_vector = ENA_IO_IRQ_IDX(qid); 1671 ena_qid = ENA_IO_RXQ_IDX(qid); 1672 1673 ctx.qid = ena_qid; 1674 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1675 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1676 ctx.msix_vector = msix_vector; 1677 ctx.queue_size = adapter->rx_ring_size; 1678 ctx.numa_node = cpu_to_node(rx_ring->cpu); 1679 1680 rc = ena_com_create_io_queue(ena_dev, &ctx); 1681 if (rc) { 1682 netif_err(adapter, ifup, adapter->netdev, 1683 "Failed to create I/O RX queue num %d rc: %d\n", 1684 qid, rc); 1685 return rc; 1686 } 1687 1688 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1689 &rx_ring->ena_com_io_sq, 1690 &rx_ring->ena_com_io_cq); 1691 if (rc) { 1692 netif_err(adapter, ifup, adapter->netdev, 1693 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1694 qid, rc); 1695 ena_com_destroy_io_queue(ena_dev, ena_qid); 1696 return rc; 1697 } 1698 1699 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); 1700 1701 return rc; 1702 } 1703 1704 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) 1705 { 1706 struct ena_com_dev *ena_dev = adapter->ena_dev; 1707 int rc, i; 1708 1709 for (i = 0; i < adapter->num_queues; i++) { 1710 rc = ena_create_io_rx_queue(adapter, i); 1711 if (rc) 1712 goto create_err; 1713 } 1714 1715 return 0; 1716 1717 create_err: 1718 while (i--) 1719 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 1720 1721 return rc; 1722 } 1723 1724 static int ena_up(struct ena_adapter *adapter) 1725 { 1726 int rc, i; 1727 1728 netdev_dbg(adapter->netdev, "%s\n", __func__); 1729 1730 ena_setup_io_intr(adapter); 1731 1732 rc = ena_request_io_irq(adapter); 1733 if (rc) 1734 goto err_req_irq; 1735 1736 /* allocate transmit descriptors */ 1737 rc = ena_setup_all_tx_resources(adapter); 1738 if (rc) 1739 goto err_setup_tx; 1740 1741 /* allocate receive descriptors */ 1742 rc = ena_setup_all_rx_resources(adapter); 1743 if (rc) 1744 goto err_setup_rx; 1745 1746 /* Create TX queues */ 1747 rc = ena_create_all_io_tx_queues(adapter); 1748 if (rc) 1749 goto err_create_tx_queues; 1750 1751 /* Create RX queues */ 1752 rc = ena_create_all_io_rx_queues(adapter); 1753 if (rc) 1754 goto err_create_rx_queues; 1755 1756 rc = ena_up_complete(adapter); 1757 if (rc) 1758 goto err_up; 1759 1760 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 1761 netif_carrier_on(adapter->netdev); 1762 1763 u64_stats_update_begin(&adapter->syncp); 1764 adapter->dev_stats.interface_up++; 1765 u64_stats_update_end(&adapter->syncp); 1766 1767 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1768 1769 /* Enable completion queues interrupt */ 1770 for (i = 0; i < adapter->num_queues; i++) 1771 ena_unmask_interrupt(&adapter->tx_ring[i], 1772 &adapter->rx_ring[i]); 1773 1774 /* schedule napi in case we had pending packets 1775 * from the last time we disable napi 1776 */ 1777 for (i = 0; i < adapter->num_queues; i++) 1778 napi_schedule(&adapter->ena_napi[i].napi); 1779 1780 return rc; 1781 1782 err_up: 1783 ena_destroy_all_rx_queues(adapter); 1784 err_create_rx_queues: 1785 ena_destroy_all_tx_queues(adapter); 1786 err_create_tx_queues: 1787 ena_free_all_io_rx_resources(adapter); 1788 err_setup_rx: 1789 ena_free_all_io_tx_resources(adapter); 1790 err_setup_tx: 1791 ena_free_io_irq(adapter); 1792 err_req_irq: 1793 1794 return rc; 1795 } 1796 1797 static void ena_down(struct ena_adapter *adapter) 1798 { 1799 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); 1800 1801 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); 1802 1803 u64_stats_update_begin(&adapter->syncp); 1804 adapter->dev_stats.interface_down++; 1805 u64_stats_update_end(&adapter->syncp); 1806 1807 netif_carrier_off(adapter->netdev); 1808 netif_tx_disable(adapter->netdev); 1809 1810 /* After this point the napi handler won't enable the tx queue */ 1811 ena_napi_disable_all(adapter); 1812 1813 /* After destroy the queue there won't be any new interrupts */ 1814 1815 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { 1816 int rc; 1817 1818 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 1819 if (rc) 1820 dev_err(&adapter->pdev->dev, "Device reset failed\n"); 1821 } 1822 1823 ena_destroy_all_io_queues(adapter); 1824 1825 ena_disable_io_intr_sync(adapter); 1826 ena_free_io_irq(adapter); 1827 ena_del_napi(adapter); 1828 1829 ena_free_all_tx_bufs(adapter); 1830 ena_free_all_rx_bufs(adapter); 1831 ena_free_all_io_tx_resources(adapter); 1832 ena_free_all_io_rx_resources(adapter); 1833 } 1834 1835 /* ena_open - Called when a network interface is made active 1836 * @netdev: network interface device structure 1837 * 1838 * Returns 0 on success, negative value on failure 1839 * 1840 * The open entry point is called when a network interface is made 1841 * active by the system (IFF_UP). At this point all resources needed 1842 * for transmit and receive operations are allocated, the interrupt 1843 * handler is registered with the OS, the watchdog timer is started, 1844 * and the stack is notified that the interface is ready. 1845 */ 1846 static int ena_open(struct net_device *netdev) 1847 { 1848 struct ena_adapter *adapter = netdev_priv(netdev); 1849 int rc; 1850 1851 /* Notify the stack of the actual queue counts. */ 1852 rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues); 1853 if (rc) { 1854 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); 1855 return rc; 1856 } 1857 1858 rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues); 1859 if (rc) { 1860 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); 1861 return rc; 1862 } 1863 1864 rc = ena_up(adapter); 1865 if (rc) 1866 return rc; 1867 1868 return rc; 1869 } 1870 1871 /* ena_close - Disables a network interface 1872 * @netdev: network interface device structure 1873 * 1874 * Returns 0, this is not allowed to fail 1875 * 1876 * The close entry point is called when an interface is de-activated 1877 * by the OS. The hardware is still under the drivers control, but 1878 * needs to be disabled. A global MAC reset is issued to stop the 1879 * hardware, and all transmit and receive resources are freed. 1880 */ 1881 static int ena_close(struct net_device *netdev) 1882 { 1883 struct ena_adapter *adapter = netdev_priv(netdev); 1884 1885 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 1886 1887 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1888 ena_down(adapter); 1889 1890 /* Check for device status and issue reset if needed*/ 1891 check_for_admin_com_state(adapter); 1892 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 1893 netif_err(adapter, ifdown, adapter->netdev, 1894 "Destroy failure, restarting device\n"); 1895 ena_dump_stats_to_dmesg(adapter); 1896 /* rtnl lock already obtained in dev_ioctl() layer */ 1897 ena_destroy_device(adapter); 1898 ena_restore_device(adapter); 1899 } 1900 1901 return 0; 1902 } 1903 1904 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) 1905 { 1906 u32 mss = skb_shinfo(skb)->gso_size; 1907 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 1908 u8 l4_protocol = 0; 1909 1910 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { 1911 ena_tx_ctx->l4_csum_enable = 1; 1912 if (mss) { 1913 ena_tx_ctx->tso_enable = 1; 1914 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; 1915 ena_tx_ctx->l4_csum_partial = 0; 1916 } else { 1917 ena_tx_ctx->tso_enable = 0; 1918 ena_meta->l4_hdr_len = 0; 1919 ena_tx_ctx->l4_csum_partial = 1; 1920 } 1921 1922 switch (ip_hdr(skb)->version) { 1923 case IPVERSION: 1924 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 1925 if (ip_hdr(skb)->frag_off & htons(IP_DF)) 1926 ena_tx_ctx->df = 1; 1927 if (mss) 1928 ena_tx_ctx->l3_csum_enable = 1; 1929 l4_protocol = ip_hdr(skb)->protocol; 1930 break; 1931 case 6: 1932 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 1933 l4_protocol = ipv6_hdr(skb)->nexthdr; 1934 break; 1935 default: 1936 break; 1937 } 1938 1939 if (l4_protocol == IPPROTO_TCP) 1940 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 1941 else 1942 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 1943 1944 ena_meta->mss = mss; 1945 ena_meta->l3_hdr_len = skb_network_header_len(skb); 1946 ena_meta->l3_hdr_offset = skb_network_offset(skb); 1947 ena_tx_ctx->meta_valid = 1; 1948 1949 } else { 1950 ena_tx_ctx->meta_valid = 0; 1951 } 1952 } 1953 1954 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, 1955 struct sk_buff *skb) 1956 { 1957 int num_frags, header_len, rc; 1958 1959 num_frags = skb_shinfo(skb)->nr_frags; 1960 header_len = skb_headlen(skb); 1961 1962 if (num_frags < tx_ring->sgl_size) 1963 return 0; 1964 1965 if ((num_frags == tx_ring->sgl_size) && 1966 (header_len < tx_ring->tx_max_header_size)) 1967 return 0; 1968 1969 u64_stats_update_begin(&tx_ring->syncp); 1970 tx_ring->tx_stats.linearize++; 1971 u64_stats_update_end(&tx_ring->syncp); 1972 1973 rc = skb_linearize(skb); 1974 if (unlikely(rc)) { 1975 u64_stats_update_begin(&tx_ring->syncp); 1976 tx_ring->tx_stats.linearize_failed++; 1977 u64_stats_update_end(&tx_ring->syncp); 1978 } 1979 1980 return rc; 1981 } 1982 1983 /* Called with netif_tx_lock. */ 1984 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) 1985 { 1986 struct ena_adapter *adapter = netdev_priv(dev); 1987 struct ena_tx_buffer *tx_info; 1988 struct ena_com_tx_ctx ena_tx_ctx; 1989 struct ena_ring *tx_ring; 1990 struct netdev_queue *txq; 1991 struct ena_com_buf *ena_buf; 1992 void *push_hdr; 1993 u32 len, last_frag; 1994 u16 next_to_use; 1995 u16 req_id; 1996 u16 push_len; 1997 u16 header_len; 1998 dma_addr_t dma; 1999 int qid, rc, nb_hw_desc; 2000 int i = -1; 2001 2002 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); 2003 /* Determine which tx ring we will be placed on */ 2004 qid = skb_get_queue_mapping(skb); 2005 tx_ring = &adapter->tx_ring[qid]; 2006 txq = netdev_get_tx_queue(dev, qid); 2007 2008 rc = ena_check_and_linearize_skb(tx_ring, skb); 2009 if (unlikely(rc)) 2010 goto error_drop_packet; 2011 2012 skb_tx_timestamp(skb); 2013 len = skb_headlen(skb); 2014 2015 next_to_use = tx_ring->next_to_use; 2016 req_id = tx_ring->free_tx_ids[next_to_use]; 2017 tx_info = &tx_ring->tx_buffer_info[req_id]; 2018 tx_info->num_of_bufs = 0; 2019 2020 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); 2021 ena_buf = tx_info->bufs; 2022 tx_info->skb = skb; 2023 2024 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2025 /* prepared the push buffer */ 2026 push_len = min_t(u32, len, tx_ring->tx_max_header_size); 2027 header_len = push_len; 2028 push_hdr = skb->data; 2029 } else { 2030 push_len = 0; 2031 header_len = min_t(u32, len, tx_ring->tx_max_header_size); 2032 push_hdr = NULL; 2033 } 2034 2035 netif_dbg(adapter, tx_queued, dev, 2036 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, 2037 push_hdr, push_len); 2038 2039 if (len > push_len) { 2040 dma = dma_map_single(tx_ring->dev, skb->data + push_len, 2041 len - push_len, DMA_TO_DEVICE); 2042 if (dma_mapping_error(tx_ring->dev, dma)) 2043 goto error_report_dma_error; 2044 2045 ena_buf->paddr = dma; 2046 ena_buf->len = len - push_len; 2047 2048 ena_buf++; 2049 tx_info->num_of_bufs++; 2050 } 2051 2052 last_frag = skb_shinfo(skb)->nr_frags; 2053 2054 for (i = 0; i < last_frag; i++) { 2055 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2056 2057 len = skb_frag_size(frag); 2058 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 2059 DMA_TO_DEVICE); 2060 if (dma_mapping_error(tx_ring->dev, dma)) 2061 goto error_report_dma_error; 2062 2063 ena_buf->paddr = dma; 2064 ena_buf->len = len; 2065 ena_buf++; 2066 } 2067 2068 tx_info->num_of_bufs += last_frag; 2069 2070 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2071 ena_tx_ctx.ena_bufs = tx_info->bufs; 2072 ena_tx_ctx.push_header = push_hdr; 2073 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2074 ena_tx_ctx.req_id = req_id; 2075 ena_tx_ctx.header_len = header_len; 2076 2077 /* set flags and meta data */ 2078 ena_tx_csum(&ena_tx_ctx, skb); 2079 2080 /* prepare the packet's descriptors to dma engine */ 2081 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2082 &nb_hw_desc); 2083 2084 if (unlikely(rc)) { 2085 netif_err(adapter, tx_queued, dev, 2086 "failed to prepare tx bufs\n"); 2087 u64_stats_update_begin(&tx_ring->syncp); 2088 tx_ring->tx_stats.queue_stop++; 2089 tx_ring->tx_stats.prepare_ctx_err++; 2090 u64_stats_update_end(&tx_ring->syncp); 2091 netif_tx_stop_queue(txq); 2092 goto error_unmap_dma; 2093 } 2094 2095 netdev_tx_sent_queue(txq, skb->len); 2096 2097 u64_stats_update_begin(&tx_ring->syncp); 2098 tx_ring->tx_stats.cnt++; 2099 tx_ring->tx_stats.bytes += skb->len; 2100 u64_stats_update_end(&tx_ring->syncp); 2101 2102 tx_info->tx_descs = nb_hw_desc; 2103 tx_info->last_jiffies = jiffies; 2104 tx_info->print_once = 0; 2105 2106 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2107 tx_ring->ring_size); 2108 2109 /* This WMB is aimed to: 2110 * 1 - perform smp barrier before reading next_to_completion 2111 * 2 - make sure the desc were written before trigger DB 2112 */ 2113 wmb(); 2114 2115 /* stop the queue when no more space available, the packet can have up 2116 * to sgl_size + 2. one for the meta descriptor and one for header 2117 * (if the header is larger than tx_max_header_size). 2118 */ 2119 if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < 2120 (tx_ring->sgl_size + 2))) { 2121 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", 2122 __func__, qid); 2123 2124 netif_tx_stop_queue(txq); 2125 u64_stats_update_begin(&tx_ring->syncp); 2126 tx_ring->tx_stats.queue_stop++; 2127 u64_stats_update_end(&tx_ring->syncp); 2128 2129 /* There is a rare condition where this function decide to 2130 * stop the queue but meanwhile clean_tx_irq updates 2131 * next_to_completion and terminates. 2132 * The queue will remain stopped forever. 2133 * To solve this issue this function perform rmb, check 2134 * the wakeup condition and wake up the queue if needed. 2135 */ 2136 smp_rmb(); 2137 2138 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) 2139 > ENA_TX_WAKEUP_THRESH) { 2140 netif_tx_wake_queue(txq); 2141 u64_stats_update_begin(&tx_ring->syncp); 2142 tx_ring->tx_stats.queue_wakeup++; 2143 u64_stats_update_end(&tx_ring->syncp); 2144 } 2145 } 2146 2147 if (netif_xmit_stopped(txq) || !skb->xmit_more) { 2148 /* trigger the dma engine */ 2149 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2150 u64_stats_update_begin(&tx_ring->syncp); 2151 tx_ring->tx_stats.doorbells++; 2152 u64_stats_update_end(&tx_ring->syncp); 2153 } 2154 2155 return NETDEV_TX_OK; 2156 2157 error_report_dma_error: 2158 u64_stats_update_begin(&tx_ring->syncp); 2159 tx_ring->tx_stats.dma_mapping_err++; 2160 u64_stats_update_end(&tx_ring->syncp); 2161 netdev_warn(adapter->netdev, "failed to map skb\n"); 2162 2163 tx_info->skb = NULL; 2164 2165 error_unmap_dma: 2166 if (i >= 0) { 2167 /* save value of frag that failed */ 2168 last_frag = i; 2169 2170 /* start back at beginning and unmap skb */ 2171 tx_info->skb = NULL; 2172 ena_buf = tx_info->bufs; 2173 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), 2174 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); 2175 2176 /* unmap remaining mapped pages */ 2177 for (i = 0; i < last_frag; i++) { 2178 ena_buf++; 2179 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), 2180 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); 2181 } 2182 } 2183 2184 error_drop_packet: 2185 2186 dev_kfree_skb(skb); 2187 return NETDEV_TX_OK; 2188 } 2189 2190 #ifdef CONFIG_NET_POLL_CONTROLLER 2191 static void ena_netpoll(struct net_device *netdev) 2192 { 2193 struct ena_adapter *adapter = netdev_priv(netdev); 2194 int i; 2195 2196 /* Dont schedule NAPI if the driver is in the middle of reset 2197 * or netdev is down. 2198 */ 2199 2200 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) || 2201 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2202 return; 2203 2204 for (i = 0; i < adapter->num_queues; i++) 2205 napi_schedule(&adapter->ena_napi[i].napi); 2206 } 2207 #endif /* CONFIG_NET_POLL_CONTROLLER */ 2208 2209 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2210 void *accel_priv, select_queue_fallback_t fallback) 2211 { 2212 u16 qid; 2213 /* we suspect that this is good for in--kernel network services that 2214 * want to loop incoming skb rx to tx in normal user generated traffic, 2215 * most probably we will not get to this 2216 */ 2217 if (skb_rx_queue_recorded(skb)) 2218 qid = skb_get_rx_queue(skb); 2219 else 2220 qid = fallback(dev, skb); 2221 2222 return qid; 2223 } 2224 2225 static void ena_config_host_info(struct ena_com_dev *ena_dev) 2226 { 2227 struct ena_admin_host_info *host_info; 2228 int rc; 2229 2230 /* Allocate only the host info */ 2231 rc = ena_com_allocate_host_info(ena_dev); 2232 if (rc) { 2233 pr_err("Cannot allocate host info\n"); 2234 return; 2235 } 2236 2237 host_info = ena_dev->host_attr.host_info; 2238 2239 host_info->os_type = ENA_ADMIN_OS_LINUX; 2240 host_info->kernel_ver = LINUX_VERSION_CODE; 2241 strncpy(host_info->kernel_ver_str, utsname()->version, 2242 sizeof(host_info->kernel_ver_str) - 1); 2243 host_info->os_dist = 0; 2244 strncpy(host_info->os_dist_str, utsname()->release, 2245 sizeof(host_info->os_dist_str) - 1); 2246 host_info->driver_version = 2247 (DRV_MODULE_VER_MAJOR) | 2248 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2249 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 2250 2251 rc = ena_com_set_host_attributes(ena_dev); 2252 if (rc) { 2253 if (rc == -EOPNOTSUPP) 2254 pr_warn("Cannot set host attributes\n"); 2255 else 2256 pr_err("Cannot set host attributes\n"); 2257 2258 goto err; 2259 } 2260 2261 return; 2262 2263 err: 2264 ena_com_delete_host_info(ena_dev); 2265 } 2266 2267 static void ena_config_debug_area(struct ena_adapter *adapter) 2268 { 2269 u32 debug_area_size; 2270 int rc, ss_count; 2271 2272 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); 2273 if (ss_count <= 0) { 2274 netif_err(adapter, drv, adapter->netdev, 2275 "SS count is negative\n"); 2276 return; 2277 } 2278 2279 /* allocate 32 bytes for each string and 64bit for the value */ 2280 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 2281 2282 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); 2283 if (rc) { 2284 pr_err("Cannot allocate debug area\n"); 2285 return; 2286 } 2287 2288 rc = ena_com_set_host_attributes(adapter->ena_dev); 2289 if (rc) { 2290 if (rc == -EOPNOTSUPP) 2291 netif_warn(adapter, drv, adapter->netdev, 2292 "Cannot set host attributes\n"); 2293 else 2294 netif_err(adapter, drv, adapter->netdev, 2295 "Cannot set host attributes\n"); 2296 goto err; 2297 } 2298 2299 return; 2300 err: 2301 ena_com_delete_debug_area(adapter->ena_dev); 2302 } 2303 2304 static void ena_get_stats64(struct net_device *netdev, 2305 struct rtnl_link_stats64 *stats) 2306 { 2307 struct ena_adapter *adapter = netdev_priv(netdev); 2308 struct ena_ring *rx_ring, *tx_ring; 2309 unsigned int start; 2310 u64 rx_drops; 2311 int i; 2312 2313 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2314 return; 2315 2316 for (i = 0; i < adapter->num_queues; i++) { 2317 u64 bytes, packets; 2318 2319 tx_ring = &adapter->tx_ring[i]; 2320 2321 do { 2322 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 2323 packets = tx_ring->tx_stats.cnt; 2324 bytes = tx_ring->tx_stats.bytes; 2325 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 2326 2327 stats->tx_packets += packets; 2328 stats->tx_bytes += bytes; 2329 2330 rx_ring = &adapter->rx_ring[i]; 2331 2332 do { 2333 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 2334 packets = rx_ring->rx_stats.cnt; 2335 bytes = rx_ring->rx_stats.bytes; 2336 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 2337 2338 stats->rx_packets += packets; 2339 stats->rx_bytes += bytes; 2340 } 2341 2342 do { 2343 start = u64_stats_fetch_begin_irq(&adapter->syncp); 2344 rx_drops = adapter->dev_stats.rx_drops; 2345 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); 2346 2347 stats->rx_dropped = rx_drops; 2348 2349 stats->multicast = 0; 2350 stats->collisions = 0; 2351 2352 stats->rx_length_errors = 0; 2353 stats->rx_crc_errors = 0; 2354 stats->rx_frame_errors = 0; 2355 stats->rx_fifo_errors = 0; 2356 stats->rx_missed_errors = 0; 2357 stats->tx_window_errors = 0; 2358 2359 stats->rx_errors = 0; 2360 stats->tx_errors = 0; 2361 } 2362 2363 static const struct net_device_ops ena_netdev_ops = { 2364 .ndo_open = ena_open, 2365 .ndo_stop = ena_close, 2366 .ndo_start_xmit = ena_start_xmit, 2367 .ndo_select_queue = ena_select_queue, 2368 .ndo_get_stats64 = ena_get_stats64, 2369 .ndo_tx_timeout = ena_tx_timeout, 2370 .ndo_change_mtu = ena_change_mtu, 2371 .ndo_set_mac_address = NULL, 2372 .ndo_validate_addr = eth_validate_addr, 2373 #ifdef CONFIG_NET_POLL_CONTROLLER 2374 .ndo_poll_controller = ena_netpoll, 2375 #endif /* CONFIG_NET_POLL_CONTROLLER */ 2376 }; 2377 2378 static int ena_device_validate_params(struct ena_adapter *adapter, 2379 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2380 { 2381 struct net_device *netdev = adapter->netdev; 2382 int rc; 2383 2384 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, 2385 adapter->mac_addr); 2386 if (!rc) { 2387 netif_err(adapter, drv, netdev, 2388 "Error, mac address are different\n"); 2389 return -EINVAL; 2390 } 2391 2392 if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) || 2393 (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) { 2394 netif_err(adapter, drv, netdev, 2395 "Error, device doesn't support enough queues\n"); 2396 return -EINVAL; 2397 } 2398 2399 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { 2400 netif_err(adapter, drv, netdev, 2401 "Error, device max mtu is smaller than netdev MTU\n"); 2402 return -EINVAL; 2403 } 2404 2405 return 0; 2406 } 2407 2408 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, 2409 struct ena_com_dev_get_features_ctx *get_feat_ctx, 2410 bool *wd_state) 2411 { 2412 struct device *dev = &pdev->dev; 2413 bool readless_supported; 2414 u32 aenq_groups; 2415 int dma_width; 2416 int rc; 2417 2418 rc = ena_com_mmio_reg_read_request_init(ena_dev); 2419 if (rc) { 2420 dev_err(dev, "failed to init mmio read less\n"); 2421 return rc; 2422 } 2423 2424 /* The PCIe configuration space revision id indicate if mmio reg 2425 * read is disabled 2426 */ 2427 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); 2428 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 2429 2430 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 2431 if (rc) { 2432 dev_err(dev, "Can not reset device\n"); 2433 goto err_mmio_read_less; 2434 } 2435 2436 rc = ena_com_validate_version(ena_dev); 2437 if (rc) { 2438 dev_err(dev, "device version is too low\n"); 2439 goto err_mmio_read_less; 2440 } 2441 2442 dma_width = ena_com_get_dma_width(ena_dev); 2443 if (dma_width < 0) { 2444 dev_err(dev, "Invalid dma width value %d", dma_width); 2445 rc = dma_width; 2446 goto err_mmio_read_less; 2447 } 2448 2449 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 2450 if (rc) { 2451 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); 2452 goto err_mmio_read_less; 2453 } 2454 2455 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 2456 if (rc) { 2457 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", 2458 rc); 2459 goto err_mmio_read_less; 2460 } 2461 2462 /* ENA admin level init */ 2463 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); 2464 if (rc) { 2465 dev_err(dev, 2466 "Can not initialize ena admin queue with device\n"); 2467 goto err_mmio_read_less; 2468 } 2469 2470 /* To enable the msix interrupts the driver needs to know the number 2471 * of queues. So the driver uses polling mode to retrieve this 2472 * information 2473 */ 2474 ena_com_set_admin_polling_mode(ena_dev, true); 2475 2476 ena_config_host_info(ena_dev); 2477 2478 /* Get Device Attributes*/ 2479 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 2480 if (rc) { 2481 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); 2482 goto err_admin_init; 2483 } 2484 2485 /* Try to turn all the available aenq groups */ 2486 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2487 BIT(ENA_ADMIN_FATAL_ERROR) | 2488 BIT(ENA_ADMIN_WARNING) | 2489 BIT(ENA_ADMIN_NOTIFICATION) | 2490 BIT(ENA_ADMIN_KEEP_ALIVE); 2491 2492 aenq_groups &= get_feat_ctx->aenq.supported_groups; 2493 2494 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 2495 if (rc) { 2496 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); 2497 goto err_admin_init; 2498 } 2499 2500 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 2501 2502 return 0; 2503 2504 err_admin_init: 2505 ena_com_delete_host_info(ena_dev); 2506 ena_com_admin_destroy(ena_dev); 2507 err_mmio_read_less: 2508 ena_com_mmio_reg_read_request_destroy(ena_dev); 2509 2510 return rc; 2511 } 2512 2513 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 2514 int io_vectors) 2515 { 2516 struct ena_com_dev *ena_dev = adapter->ena_dev; 2517 struct device *dev = &adapter->pdev->dev; 2518 int rc; 2519 2520 rc = ena_enable_msix(adapter, io_vectors); 2521 if (rc) { 2522 dev_err(dev, "Can not reserve msix vectors\n"); 2523 return rc; 2524 } 2525 2526 ena_setup_mgmnt_intr(adapter); 2527 2528 rc = ena_request_mgmnt_irq(adapter); 2529 if (rc) { 2530 dev_err(dev, "Can not setup management interrupts\n"); 2531 goto err_disable_msix; 2532 } 2533 2534 ena_com_set_admin_polling_mode(ena_dev, false); 2535 2536 ena_com_admin_aenq_enable(ena_dev); 2537 2538 return 0; 2539 2540 err_disable_msix: 2541 ena_disable_msix(adapter); 2542 2543 return rc; 2544 } 2545 2546 static void ena_destroy_device(struct ena_adapter *adapter) 2547 { 2548 struct net_device *netdev = adapter->netdev; 2549 struct ena_com_dev *ena_dev = adapter->ena_dev; 2550 bool dev_up; 2551 2552 netif_carrier_off(netdev); 2553 2554 del_timer_sync(&adapter->timer_service); 2555 2556 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2557 adapter->dev_up_before_reset = dev_up; 2558 2559 ena_com_set_admin_running_state(ena_dev, false); 2560 2561 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2562 ena_down(adapter); 2563 2564 /* Before releasing the ENA resources, a device reset is required. 2565 * (to prevent the device from accessing them). 2566 * In case the reset flag is set and the device is up, ena_down() 2567 * already perform the reset, so it can be skipped. 2568 */ 2569 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2570 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2571 2572 ena_free_mgmnt_irq(adapter); 2573 2574 ena_disable_msix(adapter); 2575 2576 ena_com_abort_admin_commands(ena_dev); 2577 2578 ena_com_wait_for_abort_completion(ena_dev); 2579 2580 ena_com_admin_destroy(ena_dev); 2581 2582 ena_com_mmio_reg_read_request_destroy(ena_dev); 2583 2584 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2585 2586 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2587 } 2588 2589 static int ena_restore_device(struct ena_adapter *adapter) 2590 { 2591 struct ena_com_dev_get_features_ctx get_feat_ctx; 2592 struct ena_com_dev *ena_dev = adapter->ena_dev; 2593 struct pci_dev *pdev = adapter->pdev; 2594 bool wd_state; 2595 int rc; 2596 2597 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2598 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); 2599 if (rc) { 2600 dev_err(&pdev->dev, "Can not initialize device\n"); 2601 goto err; 2602 } 2603 adapter->wd_state = wd_state; 2604 2605 rc = ena_device_validate_params(adapter, &get_feat_ctx); 2606 if (rc) { 2607 dev_err(&pdev->dev, "Validation of device parameters failed\n"); 2608 goto err_device_destroy; 2609 } 2610 2611 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2612 /* Make sure we don't have a race with AENQ Links state handler */ 2613 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 2614 netif_carrier_on(adapter->netdev); 2615 2616 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 2617 adapter->num_queues); 2618 if (rc) { 2619 dev_err(&pdev->dev, "Enable MSI-X failed\n"); 2620 goto err_device_destroy; 2621 } 2622 /* If the interface was up before the reset bring it up */ 2623 if (adapter->dev_up_before_reset) { 2624 rc = ena_up(adapter); 2625 if (rc) { 2626 dev_err(&pdev->dev, "Failed to create I/O queues\n"); 2627 goto err_disable_msix; 2628 } 2629 } 2630 2631 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2632 dev_err(&pdev->dev, "Device reset completed successfully\n"); 2633 2634 return rc; 2635 err_disable_msix: 2636 ena_free_mgmnt_irq(adapter); 2637 ena_disable_msix(adapter); 2638 err_device_destroy: 2639 ena_com_admin_destroy(ena_dev); 2640 err: 2641 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2642 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2643 dev_err(&pdev->dev, 2644 "Reset attempt failed. Can not reset the device\n"); 2645 2646 return rc; 2647 } 2648 2649 static void ena_fw_reset_device(struct work_struct *work) 2650 { 2651 struct ena_adapter *adapter = 2652 container_of(work, struct ena_adapter, reset_task); 2653 struct pci_dev *pdev = adapter->pdev; 2654 2655 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 2656 dev_err(&pdev->dev, 2657 "device reset schedule while reset bit is off\n"); 2658 return; 2659 } 2660 rtnl_lock(); 2661 ena_destroy_device(adapter); 2662 ena_restore_device(adapter); 2663 rtnl_unlock(); 2664 } 2665 2666 static int check_missing_comp_in_queue(struct ena_adapter *adapter, 2667 struct ena_ring *tx_ring) 2668 { 2669 struct ena_tx_buffer *tx_buf; 2670 unsigned long last_jiffies; 2671 u32 missed_tx = 0; 2672 int i, rc = 0; 2673 2674 for (i = 0; i < tx_ring->ring_size; i++) { 2675 tx_buf = &tx_ring->tx_buffer_info[i]; 2676 last_jiffies = tx_buf->last_jiffies; 2677 if (unlikely(last_jiffies && 2678 time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) { 2679 if (!tx_buf->print_once) 2680 netif_notice(adapter, tx_err, adapter->netdev, 2681 "Found a Tx that wasn't completed on time, qid %d, index %d.\n", 2682 tx_ring->qid, i); 2683 2684 tx_buf->print_once = 1; 2685 missed_tx++; 2686 } 2687 } 2688 2689 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { 2690 netif_err(adapter, tx_err, adapter->netdev, 2691 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", 2692 missed_tx, 2693 adapter->missing_tx_completion_threshold); 2694 adapter->reset_reason = 2695 ENA_REGS_RESET_MISS_TX_CMPL; 2696 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2697 rc = -EIO; 2698 } 2699 2700 u64_stats_update_begin(&tx_ring->syncp); 2701 tx_ring->tx_stats.missed_tx = missed_tx; 2702 u64_stats_update_end(&tx_ring->syncp); 2703 2704 return rc; 2705 } 2706 2707 static void check_for_missing_tx_completions(struct ena_adapter *adapter) 2708 { 2709 struct ena_ring *tx_ring; 2710 int i, budget, rc; 2711 2712 /* Make sure the driver doesn't turn the device in other process */ 2713 smp_rmb(); 2714 2715 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2716 return; 2717 2718 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2719 return; 2720 2721 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 2722 return; 2723 2724 budget = ENA_MONITORED_TX_QUEUES; 2725 2726 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { 2727 tx_ring = &adapter->tx_ring[i]; 2728 2729 rc = check_missing_comp_in_queue(adapter, tx_ring); 2730 if (unlikely(rc)) 2731 return; 2732 2733 budget--; 2734 if (!budget) 2735 break; 2736 } 2737 2738 adapter->last_monitored_tx_qid = i % adapter->num_queues; 2739 } 2740 2741 /* trigger napi schedule after 2 consecutive detections */ 2742 #define EMPTY_RX_REFILL 2 2743 /* For the rare case where the device runs out of Rx descriptors and the 2744 * napi handler failed to refill new Rx descriptors (due to a lack of memory 2745 * for example). 2746 * This case will lead to a deadlock: 2747 * The device won't send interrupts since all the new Rx packets will be dropped 2748 * The napi handler won't allocate new Rx descriptors so the device will be 2749 * able to send new packets. 2750 * 2751 * This scenario can happen when the kernel's vm.min_free_kbytes is too small. 2752 * It is recommended to have at least 512MB, with a minimum of 128MB for 2753 * constrained environment). 2754 * 2755 * When such a situation is detected - Reschedule napi 2756 */ 2757 static void check_for_empty_rx_ring(struct ena_adapter *adapter) 2758 { 2759 struct ena_ring *rx_ring; 2760 int i, refill_required; 2761 2762 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2763 return; 2764 2765 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2766 return; 2767 2768 for (i = 0; i < adapter->num_queues; i++) { 2769 rx_ring = &adapter->rx_ring[i]; 2770 2771 refill_required = 2772 ena_com_sq_empty_space(rx_ring->ena_com_io_sq); 2773 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 2774 rx_ring->empty_rx_queue++; 2775 2776 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 2777 u64_stats_update_begin(&rx_ring->syncp); 2778 rx_ring->rx_stats.empty_rx_ring++; 2779 u64_stats_update_end(&rx_ring->syncp); 2780 2781 netif_err(adapter, drv, adapter->netdev, 2782 "trigger refill for ring %d\n", i); 2783 2784 napi_schedule(rx_ring->napi); 2785 rx_ring->empty_rx_queue = 0; 2786 } 2787 } else { 2788 rx_ring->empty_rx_queue = 0; 2789 } 2790 } 2791 } 2792 2793 /* Check for keep alive expiration */ 2794 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 2795 { 2796 unsigned long keep_alive_expired; 2797 2798 if (!adapter->wd_state) 2799 return; 2800 2801 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2802 return; 2803 2804 keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + 2805 adapter->keep_alive_timeout); 2806 if (unlikely(time_is_before_jiffies(keep_alive_expired))) { 2807 netif_err(adapter, drv, adapter->netdev, 2808 "Keep alive watchdog timeout.\n"); 2809 u64_stats_update_begin(&adapter->syncp); 2810 adapter->dev_stats.wd_expired++; 2811 u64_stats_update_end(&adapter->syncp); 2812 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 2813 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2814 } 2815 } 2816 2817 static void check_for_admin_com_state(struct ena_adapter *adapter) 2818 { 2819 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { 2820 netif_err(adapter, drv, adapter->netdev, 2821 "ENA admin queue is not in running state!\n"); 2822 u64_stats_update_begin(&adapter->syncp); 2823 adapter->dev_stats.admin_q_pause++; 2824 u64_stats_update_end(&adapter->syncp); 2825 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 2826 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2827 } 2828 } 2829 2830 static void ena_update_hints(struct ena_adapter *adapter, 2831 struct ena_admin_ena_hw_hints *hints) 2832 { 2833 struct net_device *netdev = adapter->netdev; 2834 2835 if (hints->admin_completion_tx_timeout) 2836 adapter->ena_dev->admin_queue.completion_timeout = 2837 hints->admin_completion_tx_timeout * 1000; 2838 2839 if (hints->mmio_read_timeout) 2840 /* convert to usec */ 2841 adapter->ena_dev->mmio_read.reg_read_to = 2842 hints->mmio_read_timeout * 1000; 2843 2844 if (hints->missed_tx_completion_count_threshold_to_reset) 2845 adapter->missing_tx_completion_threshold = 2846 hints->missed_tx_completion_count_threshold_to_reset; 2847 2848 if (hints->missing_tx_completion_timeout) { 2849 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2850 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; 2851 else 2852 adapter->missing_tx_completion_to = 2853 msecs_to_jiffies(hints->missing_tx_completion_timeout); 2854 } 2855 2856 if (hints->netdev_wd_timeout) 2857 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); 2858 2859 if (hints->driver_watchdog_timeout) { 2860 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2861 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2862 else 2863 adapter->keep_alive_timeout = 2864 msecs_to_jiffies(hints->driver_watchdog_timeout); 2865 } 2866 } 2867 2868 static void ena_update_host_info(struct ena_admin_host_info *host_info, 2869 struct net_device *netdev) 2870 { 2871 host_info->supported_network_features[0] = 2872 netdev->features & GENMASK_ULL(31, 0); 2873 host_info->supported_network_features[1] = 2874 (netdev->features & GENMASK_ULL(63, 32)) >> 32; 2875 } 2876 2877 static void ena_timer_service(struct timer_list *t) 2878 { 2879 struct ena_adapter *adapter = from_timer(adapter, t, timer_service); 2880 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; 2881 struct ena_admin_host_info *host_info = 2882 adapter->ena_dev->host_attr.host_info; 2883 2884 check_for_missing_keep_alive(adapter); 2885 2886 check_for_admin_com_state(adapter); 2887 2888 check_for_missing_tx_completions(adapter); 2889 2890 check_for_empty_rx_ring(adapter); 2891 2892 if (debug_area) 2893 ena_dump_stats_to_buf(adapter, debug_area); 2894 2895 if (host_info) 2896 ena_update_host_info(host_info, adapter->netdev); 2897 2898 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 2899 netif_err(adapter, drv, adapter->netdev, 2900 "Trigger reset is on\n"); 2901 ena_dump_stats_to_dmesg(adapter); 2902 queue_work(ena_wq, &adapter->reset_task); 2903 return; 2904 } 2905 2906 /* Reset the timer */ 2907 mod_timer(&adapter->timer_service, jiffies + HZ); 2908 } 2909 2910 static int ena_calc_io_queue_num(struct pci_dev *pdev, 2911 struct ena_com_dev *ena_dev, 2912 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2913 { 2914 int io_sq_num, io_queue_num; 2915 2916 /* In case of LLQ use the llq number in the get feature cmd */ 2917 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2918 io_sq_num = get_feat_ctx->max_queues.max_llq_num; 2919 2920 if (io_sq_num == 0) { 2921 dev_err(&pdev->dev, 2922 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); 2923 2924 ena_dev->tx_mem_queue_type = 2925 ENA_ADMIN_PLACEMENT_POLICY_HOST; 2926 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 2927 } 2928 } else { 2929 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 2930 } 2931 2932 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); 2933 io_queue_num = min_t(int, io_queue_num, io_sq_num); 2934 io_queue_num = min_t(int, io_queue_num, 2935 get_feat_ctx->max_queues.max_cq_num); 2936 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ 2937 io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1); 2938 if (unlikely(!io_queue_num)) { 2939 dev_err(&pdev->dev, "The device doesn't have io queues\n"); 2940 return -EFAULT; 2941 } 2942 2943 return io_queue_num; 2944 } 2945 2946 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, 2947 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2948 { 2949 bool has_mem_bar; 2950 2951 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); 2952 2953 /* Enable push mode if device supports LLQ */ 2954 if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) 2955 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 2956 else 2957 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2958 } 2959 2960 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, 2961 struct net_device *netdev) 2962 { 2963 netdev_features_t dev_features = 0; 2964 2965 /* Set offload features */ 2966 if (feat->offload.tx & 2967 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2968 dev_features |= NETIF_F_IP_CSUM; 2969 2970 if (feat->offload.tx & 2971 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2972 dev_features |= NETIF_F_IPV6_CSUM; 2973 2974 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2975 dev_features |= NETIF_F_TSO; 2976 2977 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) 2978 dev_features |= NETIF_F_TSO6; 2979 2980 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) 2981 dev_features |= NETIF_F_TSO_ECN; 2982 2983 if (feat->offload.rx_supported & 2984 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2985 dev_features |= NETIF_F_RXCSUM; 2986 2987 if (feat->offload.rx_supported & 2988 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2989 dev_features |= NETIF_F_RXCSUM; 2990 2991 netdev->features = 2992 dev_features | 2993 NETIF_F_SG | 2994 NETIF_F_RXHASH | 2995 NETIF_F_HIGHDMA; 2996 2997 netdev->hw_features |= netdev->features; 2998 netdev->vlan_features |= netdev->features; 2999 } 3000 3001 static void ena_set_conf_feat_params(struct ena_adapter *adapter, 3002 struct ena_com_dev_get_features_ctx *feat) 3003 { 3004 struct net_device *netdev = adapter->netdev; 3005 3006 /* Copy mac address */ 3007 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { 3008 eth_hw_addr_random(netdev); 3009 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 3010 } else { 3011 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); 3012 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 3013 } 3014 3015 /* Set offload features */ 3016 ena_set_dev_offloads(feat, netdev); 3017 3018 adapter->max_mtu = feat->dev_attr.max_mtu; 3019 netdev->max_mtu = adapter->max_mtu; 3020 netdev->min_mtu = ENA_MIN_MTU; 3021 } 3022 3023 static int ena_rss_init_default(struct ena_adapter *adapter) 3024 { 3025 struct ena_com_dev *ena_dev = adapter->ena_dev; 3026 struct device *dev = &adapter->pdev->dev; 3027 int rc, i; 3028 u32 val; 3029 3030 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 3031 if (unlikely(rc)) { 3032 dev_err(dev, "Cannot init indirect table\n"); 3033 goto err_rss_init; 3034 } 3035 3036 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 3037 val = ethtool_rxfh_indir_default(i, adapter->num_queues); 3038 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 3039 ENA_IO_RXQ_IDX(val)); 3040 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3041 dev_err(dev, "Cannot fill indirect table\n"); 3042 goto err_fill_indir; 3043 } 3044 } 3045 3046 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 3047 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 3048 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3049 dev_err(dev, "Cannot fill hash function\n"); 3050 goto err_fill_indir; 3051 } 3052 3053 rc = ena_com_set_default_hash_ctrl(ena_dev); 3054 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 3055 dev_err(dev, "Cannot fill hash control\n"); 3056 goto err_fill_indir; 3057 } 3058 3059 return 0; 3060 3061 err_fill_indir: 3062 ena_com_rss_destroy(ena_dev); 3063 err_rss_init: 3064 3065 return rc; 3066 } 3067 3068 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3069 { 3070 int release_bars; 3071 3072 if (ena_dev->mem_bar) 3073 devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3074 3075 if (ena_dev->reg_bar) 3076 devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3077 3078 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3079 pci_release_selected_regions(pdev, release_bars); 3080 } 3081 3082 static int ena_calc_queue_size(struct pci_dev *pdev, 3083 struct ena_com_dev *ena_dev, 3084 u16 *max_tx_sgl_size, 3085 u16 *max_rx_sgl_size, 3086 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3087 { 3088 u32 queue_size = ENA_DEFAULT_RING_SIZE; 3089 3090 queue_size = min_t(u32, queue_size, 3091 get_feat_ctx->max_queues.max_cq_depth); 3092 queue_size = min_t(u32, queue_size, 3093 get_feat_ctx->max_queues.max_sq_depth); 3094 3095 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 3096 queue_size = min_t(u32, queue_size, 3097 get_feat_ctx->max_queues.max_llq_depth); 3098 3099 queue_size = rounddown_pow_of_two(queue_size); 3100 3101 if (unlikely(!queue_size)) { 3102 dev_err(&pdev->dev, "Invalid queue size\n"); 3103 return -EFAULT; 3104 } 3105 3106 *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 3107 get_feat_ctx->max_queues.max_packet_tx_descs); 3108 *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 3109 get_feat_ctx->max_queues.max_packet_rx_descs); 3110 3111 return queue_size; 3112 } 3113 3114 /* ena_probe - Device Initialization Routine 3115 * @pdev: PCI device information struct 3116 * @ent: entry in ena_pci_tbl 3117 * 3118 * Returns 0 on success, negative on failure 3119 * 3120 * ena_probe initializes an adapter identified by a pci_dev structure. 3121 * The OS initialization, configuring of the adapter private structure, 3122 * and a hardware reset occur. 3123 */ 3124 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3125 { 3126 struct ena_com_dev_get_features_ctx get_feat_ctx; 3127 static int version_printed; 3128 struct net_device *netdev; 3129 struct ena_adapter *adapter; 3130 struct ena_com_dev *ena_dev = NULL; 3131 static int adapters_found; 3132 int io_queue_num, bars, rc; 3133 int queue_size; 3134 u16 tx_sgl_size = 0; 3135 u16 rx_sgl_size = 0; 3136 bool wd_state; 3137 3138 dev_dbg(&pdev->dev, "%s\n", __func__); 3139 3140 if (version_printed++ == 0) 3141 dev_info(&pdev->dev, "%s", version); 3142 3143 rc = pci_enable_device_mem(pdev); 3144 if (rc) { 3145 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); 3146 return rc; 3147 } 3148 3149 pci_set_master(pdev); 3150 3151 ena_dev = vzalloc(sizeof(*ena_dev)); 3152 if (!ena_dev) { 3153 rc = -ENOMEM; 3154 goto err_disable_device; 3155 } 3156 3157 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3158 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); 3159 if (rc) { 3160 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", 3161 rc); 3162 goto err_free_ena_dev; 3163 } 3164 3165 ena_dev->reg_bar = devm_ioremap(&pdev->dev, 3166 pci_resource_start(pdev, ENA_REG_BAR), 3167 pci_resource_len(pdev, ENA_REG_BAR)); 3168 if (!ena_dev->reg_bar) { 3169 dev_err(&pdev->dev, "failed to remap regs bar\n"); 3170 rc = -EFAULT; 3171 goto err_free_region; 3172 } 3173 3174 ena_dev->dmadev = &pdev->dev; 3175 3176 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); 3177 if (rc) { 3178 dev_err(&pdev->dev, "ena device init failed\n"); 3179 if (rc == -ETIME) 3180 rc = -EPROBE_DEFER; 3181 goto err_free_region; 3182 } 3183 3184 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); 3185 3186 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3187 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, 3188 pci_resource_start(pdev, ENA_MEM_BAR), 3189 pci_resource_len(pdev, ENA_MEM_BAR)); 3190 if (!ena_dev->mem_bar) { 3191 rc = -EFAULT; 3192 goto err_device_destroy; 3193 } 3194 } 3195 3196 /* initial Tx interrupt delay, Assumes 1 usec granularity. 3197 * Updated during device initialization with the real granularity 3198 */ 3199 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; 3200 io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); 3201 queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size, 3202 &rx_sgl_size, &get_feat_ctx); 3203 if ((queue_size <= 0) || (io_queue_num <= 0)) { 3204 rc = -EFAULT; 3205 goto err_device_destroy; 3206 } 3207 3208 dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", 3209 io_queue_num, queue_size); 3210 3211 /* dev zeroed in init_etherdev */ 3212 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); 3213 if (!netdev) { 3214 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); 3215 rc = -ENOMEM; 3216 goto err_device_destroy; 3217 } 3218 3219 SET_NETDEV_DEV(netdev, &pdev->dev); 3220 3221 adapter = netdev_priv(netdev); 3222 pci_set_drvdata(pdev, adapter); 3223 3224 adapter->ena_dev = ena_dev; 3225 adapter->netdev = netdev; 3226 adapter->pdev = pdev; 3227 3228 ena_set_conf_feat_params(adapter, &get_feat_ctx); 3229 3230 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3231 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 3232 3233 adapter->tx_ring_size = queue_size; 3234 adapter->rx_ring_size = queue_size; 3235 3236 adapter->max_tx_sgl_size = tx_sgl_size; 3237 adapter->max_rx_sgl_size = rx_sgl_size; 3238 3239 adapter->num_queues = io_queue_num; 3240 adapter->last_monitored_tx_qid = 0; 3241 3242 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; 3243 adapter->wd_state = wd_state; 3244 3245 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); 3246 3247 rc = ena_com_init_interrupt_moderation(adapter->ena_dev); 3248 if (rc) { 3249 dev_err(&pdev->dev, 3250 "Failed to query interrupt moderation feature\n"); 3251 goto err_netdev_destroy; 3252 } 3253 ena_init_io_rings(adapter); 3254 3255 netdev->netdev_ops = &ena_netdev_ops; 3256 netdev->watchdog_timeo = TX_TIMEOUT; 3257 ena_set_ethtool_ops(netdev); 3258 3259 netdev->priv_flags |= IFF_UNICAST_FLT; 3260 3261 u64_stats_init(&adapter->syncp); 3262 3263 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 3264 if (rc) { 3265 dev_err(&pdev->dev, 3266 "Failed to enable and set the admin interrupts\n"); 3267 goto err_worker_destroy; 3268 } 3269 rc = ena_rss_init_default(adapter); 3270 if (rc && (rc != -EOPNOTSUPP)) { 3271 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); 3272 goto err_free_msix; 3273 } 3274 3275 ena_config_debug_area(adapter); 3276 3277 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); 3278 3279 netif_carrier_off(netdev); 3280 3281 rc = register_netdev(netdev); 3282 if (rc) { 3283 dev_err(&pdev->dev, "Cannot register net device\n"); 3284 goto err_rss; 3285 } 3286 3287 INIT_WORK(&adapter->reset_task, ena_fw_reset_device); 3288 3289 adapter->last_keep_alive_jiffies = jiffies; 3290 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 3291 adapter->missing_tx_completion_to = TX_TIMEOUT; 3292 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; 3293 3294 ena_update_hints(adapter, &get_feat_ctx.hw_hints); 3295 3296 timer_setup(&adapter->timer_service, ena_timer_service, 0); 3297 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 3298 3299 dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", 3300 DEVICE_NAME, (long)pci_resource_start(pdev, 0), 3301 netdev->dev_addr, io_queue_num); 3302 3303 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3304 3305 adapters_found++; 3306 3307 return 0; 3308 3309 err_rss: 3310 ena_com_delete_debug_area(ena_dev); 3311 ena_com_rss_destroy(ena_dev); 3312 err_free_msix: 3313 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 3314 ena_free_mgmnt_irq(adapter); 3315 ena_disable_msix(adapter); 3316 err_worker_destroy: 3317 ena_com_destroy_interrupt_moderation(ena_dev); 3318 del_timer(&adapter->timer_service); 3319 err_netdev_destroy: 3320 free_netdev(netdev); 3321 err_device_destroy: 3322 ena_com_delete_host_info(ena_dev); 3323 ena_com_admin_destroy(ena_dev); 3324 err_free_region: 3325 ena_release_bars(ena_dev, pdev); 3326 err_free_ena_dev: 3327 vfree(ena_dev); 3328 err_disable_device: 3329 pci_disable_device(pdev); 3330 return rc; 3331 } 3332 3333 /*****************************************************************************/ 3334 static int ena_sriov_configure(struct pci_dev *dev, int numvfs) 3335 { 3336 int rc; 3337 3338 if (numvfs > 0) { 3339 rc = pci_enable_sriov(dev, numvfs); 3340 if (rc != 0) { 3341 dev_err(&dev->dev, 3342 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n", 3343 numvfs, rc); 3344 return rc; 3345 } 3346 3347 return numvfs; 3348 } 3349 3350 if (numvfs == 0) { 3351 pci_disable_sriov(dev); 3352 return 0; 3353 } 3354 3355 return -EINVAL; 3356 } 3357 3358 /*****************************************************************************/ 3359 /*****************************************************************************/ 3360 3361 /* ena_remove - Device Removal Routine 3362 * @pdev: PCI device information struct 3363 * 3364 * ena_remove is called by the PCI subsystem to alert the driver 3365 * that it should release a PCI device. 3366 */ 3367 static void ena_remove(struct pci_dev *pdev) 3368 { 3369 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3370 struct ena_com_dev *ena_dev; 3371 struct net_device *netdev; 3372 3373 ena_dev = adapter->ena_dev; 3374 netdev = adapter->netdev; 3375 3376 #ifdef CONFIG_RFS_ACCEL 3377 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { 3378 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 3379 netdev->rx_cpu_rmap = NULL; 3380 } 3381 #endif /* CONFIG_RFS_ACCEL */ 3382 3383 unregister_netdev(netdev); 3384 del_timer_sync(&adapter->timer_service); 3385 3386 cancel_work_sync(&adapter->reset_task); 3387 3388 /* Reset the device only if the device is running. */ 3389 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 3390 ena_com_dev_reset(ena_dev, adapter->reset_reason); 3391 3392 ena_free_mgmnt_irq(adapter); 3393 3394 ena_disable_msix(adapter); 3395 3396 free_netdev(netdev); 3397 3398 ena_com_mmio_reg_read_request_destroy(ena_dev); 3399 3400 ena_com_abort_admin_commands(ena_dev); 3401 3402 ena_com_wait_for_abort_completion(ena_dev); 3403 3404 ena_com_admin_destroy(ena_dev); 3405 3406 ena_com_rss_destroy(ena_dev); 3407 3408 ena_com_delete_debug_area(ena_dev); 3409 3410 ena_com_delete_host_info(ena_dev); 3411 3412 ena_release_bars(ena_dev, pdev); 3413 3414 pci_disable_device(pdev); 3415 3416 ena_com_destroy_interrupt_moderation(ena_dev); 3417 3418 vfree(ena_dev); 3419 } 3420 3421 #ifdef CONFIG_PM 3422 /* ena_suspend - PM suspend callback 3423 * @pdev: PCI device information struct 3424 * @state:power state 3425 */ 3426 static int ena_suspend(struct pci_dev *pdev, pm_message_t state) 3427 { 3428 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3429 3430 u64_stats_update_begin(&adapter->syncp); 3431 adapter->dev_stats.suspend++; 3432 u64_stats_update_end(&adapter->syncp); 3433 3434 rtnl_lock(); 3435 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 3436 dev_err(&pdev->dev, 3437 "ignoring device reset request as the device is being suspended\n"); 3438 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3439 } 3440 ena_destroy_device(adapter); 3441 rtnl_unlock(); 3442 return 0; 3443 } 3444 3445 /* ena_resume - PM resume callback 3446 * @pdev: PCI device information struct 3447 * 3448 */ 3449 static int ena_resume(struct pci_dev *pdev) 3450 { 3451 struct ena_adapter *adapter = pci_get_drvdata(pdev); 3452 int rc; 3453 3454 u64_stats_update_begin(&adapter->syncp); 3455 adapter->dev_stats.resume++; 3456 u64_stats_update_end(&adapter->syncp); 3457 3458 rtnl_lock(); 3459 rc = ena_restore_device(adapter); 3460 rtnl_unlock(); 3461 return rc; 3462 } 3463 #endif 3464 3465 static struct pci_driver ena_pci_driver = { 3466 .name = DRV_MODULE_NAME, 3467 .id_table = ena_pci_tbl, 3468 .probe = ena_probe, 3469 .remove = ena_remove, 3470 #ifdef CONFIG_PM 3471 .suspend = ena_suspend, 3472 .resume = ena_resume, 3473 #endif 3474 .sriov_configure = ena_sriov_configure, 3475 }; 3476 3477 static int __init ena_init(void) 3478 { 3479 pr_info("%s", version); 3480 3481 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); 3482 if (!ena_wq) { 3483 pr_err("Failed to create workqueue\n"); 3484 return -ENOMEM; 3485 } 3486 3487 return pci_register_driver(&ena_pci_driver); 3488 } 3489 3490 static void __exit ena_cleanup(void) 3491 { 3492 pci_unregister_driver(&ena_pci_driver); 3493 3494 if (ena_wq) { 3495 destroy_workqueue(ena_wq); 3496 ena_wq = NULL; 3497 } 3498 } 3499 3500 /****************************************************************************** 3501 ******************************** AENQ Handlers ******************************* 3502 *****************************************************************************/ 3503 /* ena_update_on_link_change: 3504 * Notify the network interface about the change in link status 3505 */ 3506 static void ena_update_on_link_change(void *adapter_data, 3507 struct ena_admin_aenq_entry *aenq_e) 3508 { 3509 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3510 struct ena_admin_aenq_link_change_desc *aenq_desc = 3511 (struct ena_admin_aenq_link_change_desc *)aenq_e; 3512 int status = aenq_desc->flags & 3513 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 3514 3515 if (status) { 3516 netdev_dbg(adapter->netdev, "%s\n", __func__); 3517 set_bit(ENA_FLAG_LINK_UP, &adapter->flags); 3518 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) 3519 netif_carrier_on(adapter->netdev); 3520 } else { 3521 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); 3522 netif_carrier_off(adapter->netdev); 3523 } 3524 } 3525 3526 static void ena_keep_alive_wd(void *adapter_data, 3527 struct ena_admin_aenq_entry *aenq_e) 3528 { 3529 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3530 struct ena_admin_aenq_keep_alive_desc *desc; 3531 u64 rx_drops; 3532 3533 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3534 adapter->last_keep_alive_jiffies = jiffies; 3535 3536 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; 3537 3538 u64_stats_update_begin(&adapter->syncp); 3539 adapter->dev_stats.rx_drops = rx_drops; 3540 u64_stats_update_end(&adapter->syncp); 3541 } 3542 3543 static void ena_notification(void *adapter_data, 3544 struct ena_admin_aenq_entry *aenq_e) 3545 { 3546 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3547 struct ena_admin_ena_hw_hints *hints; 3548 3549 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 3550 "Invalid group(%x) expected %x\n", 3551 aenq_e->aenq_common_desc.group, 3552 ENA_ADMIN_NOTIFICATION); 3553 3554 switch (aenq_e->aenq_common_desc.syndrom) { 3555 case ENA_ADMIN_UPDATE_HINTS: 3556 hints = (struct ena_admin_ena_hw_hints *) 3557 (&aenq_e->inline_data_w4); 3558 ena_update_hints(adapter, hints); 3559 break; 3560 default: 3561 netif_err(adapter, drv, adapter->netdev, 3562 "Invalid aenq notification link state %d\n", 3563 aenq_e->aenq_common_desc.syndrom); 3564 } 3565 } 3566 3567 /* This handler will called for unknown event group or unimplemented handlers*/ 3568 static void unimplemented_aenq_handler(void *data, 3569 struct ena_admin_aenq_entry *aenq_e) 3570 { 3571 struct ena_adapter *adapter = (struct ena_adapter *)data; 3572 3573 netif_err(adapter, drv, adapter->netdev, 3574 "Unknown event was received or event with unimplemented handler\n"); 3575 } 3576 3577 static struct ena_aenq_handlers aenq_handlers = { 3578 .handlers = { 3579 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3580 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3581 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 3582 }, 3583 .unimplemented_handler = unimplemented_aenq_handler 3584 }; 3585 3586 module_init(ena_init); 3587 module_exit(ena_cleanup); 3588