1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #ifdef CONFIG_RFS_ACCEL 9 #include <linux/cpu_rmap.h> 10 #endif /* CONFIG_RFS_ACCEL */ 11 #include <linux/ethtool.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/numa.h> 15 #include <linux/pci.h> 16 #include <linux/utsname.h> 17 #include <linux/version.h> 18 #include <linux/vmalloc.h> 19 #include <net/ip.h> 20 21 #include "ena_netdev.h" 22 #include <linux/bpf_trace.h> 23 #include "ena_pci_id_tbl.h" 24 25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); 26 MODULE_DESCRIPTION(DEVICE_NAME); 27 MODULE_LICENSE("GPL"); 28 29 /* Time in jiffies before concluding the transmitter is hung. */ 30 #define TX_TIMEOUT (5 * HZ) 31 32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) 33 34 #define ENA_NAPI_BUDGET 64 35 36 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ 37 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 38 static int debug = -1; 39 module_param(debug, int, 0); 40 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 41 42 static struct ena_aenq_handlers aenq_handlers; 43 44 static struct workqueue_struct *ena_wq; 45 46 MODULE_DEVICE_TABLE(pci, ena_pci_tbl); 47 48 static int ena_rss_init_default(struct ena_adapter *adapter); 49 static void check_for_admin_com_state(struct ena_adapter *adapter); 50 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); 51 static int ena_restore_device(struct ena_adapter *adapter); 52 53 static void ena_init_io_rings(struct ena_adapter *adapter, 54 int first_index, int count); 55 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, 56 int count); 57 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, 58 int count); 59 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid); 60 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, 61 int first_index, 62 int count); 63 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid); 64 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid); 65 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); 66 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter); 67 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter); 68 static void ena_napi_disable_in_range(struct ena_adapter *adapter, 69 int first_index, int count); 70 static void ena_napi_enable_in_range(struct ena_adapter *adapter, 71 int first_index, int count); 72 static int ena_up(struct ena_adapter *adapter); 73 static void ena_down(struct ena_adapter *adapter); 74 static void ena_unmask_interrupt(struct ena_ring *tx_ring, 75 struct ena_ring *rx_ring); 76 static void ena_update_ring_numa_node(struct ena_ring *tx_ring, 77 struct ena_ring *rx_ring); 78 static void ena_unmap_tx_buff(struct ena_ring *tx_ring, 79 struct ena_tx_buffer *tx_info); 80 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, 81 int first_index, int count); 82 83 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ 84 static void ena_increase_stat(u64 *statp, u64 cnt, 85 struct u64_stats_sync *syncp) 86 { 87 u64_stats_update_begin(syncp); 88 (*statp) += cnt; 89 u64_stats_update_end(syncp); 90 } 91 92 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) 93 { 94 struct ena_adapter *adapter = netdev_priv(dev); 95 96 /* Change the state of the device to trigger reset 97 * Check that we are not in the middle or a trigger already 98 */ 99 100 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 101 return; 102 103 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; 104 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); 105 106 netif_err(adapter, tx_err, dev, "Transmit time out\n"); 107 } 108 109 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) 110 { 111 int i; 112 113 for (i = 0; i < adapter->num_io_queues; i++) 114 adapter->rx_ring[i].mtu = mtu; 115 } 116 117 static int ena_change_mtu(struct net_device *dev, int new_mtu) 118 { 119 struct ena_adapter *adapter = netdev_priv(dev); 120 int ret; 121 122 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 123 if (!ret) { 124 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); 125 update_rx_ring_mtu(adapter, new_mtu); 126 dev->mtu = new_mtu; 127 } else { 128 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", 129 new_mtu); 130 } 131 132 return ret; 133 } 134 135 static int ena_xmit_common(struct net_device *dev, 136 struct ena_ring *ring, 137 struct ena_tx_buffer *tx_info, 138 struct ena_com_tx_ctx *ena_tx_ctx, 139 u16 next_to_use, 140 u32 bytes) 141 { 142 struct ena_adapter *adapter = netdev_priv(dev); 143 int rc, nb_hw_desc; 144 145 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, 146 ena_tx_ctx))) { 147 netif_dbg(adapter, tx_queued, dev, 148 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", 149 ring->qid); 150 ena_com_write_sq_doorbell(ring->ena_com_io_sq); 151 } 152 153 /* prepare the packet's descriptors to dma engine */ 154 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, 155 &nb_hw_desc); 156 157 /* In case there isn't enough space in the queue for the packet, 158 * we simply drop it. All other failure reasons of 159 * ena_com_prepare_tx() are fatal and therefore require a device reset. 160 */ 161 if (unlikely(rc)) { 162 netif_err(adapter, tx_queued, dev, 163 "Failed to prepare tx bufs\n"); 164 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, 165 &ring->syncp); 166 if (rc != -ENOMEM) { 167 adapter->reset_reason = 168 ENA_REGS_RESET_DRIVER_INVALID_STATE; 169 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 170 } 171 return rc; 172 } 173 174 u64_stats_update_begin(&ring->syncp); 175 ring->tx_stats.cnt++; 176 ring->tx_stats.bytes += bytes; 177 u64_stats_update_end(&ring->syncp); 178 179 tx_info->tx_descs = nb_hw_desc; 180 tx_info->last_jiffies = jiffies; 181 tx_info->print_once = 0; 182 183 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 184 ring->ring_size); 185 return 0; 186 } 187 188 /* This is the XDP napi callback. XDP queues use a separate napi callback 189 * than Rx/Tx queues. 190 */ 191 static int ena_xdp_io_poll(struct napi_struct *napi, int budget) 192 { 193 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 194 u32 xdp_work_done, xdp_budget; 195 struct ena_ring *xdp_ring; 196 int napi_comp_call = 0; 197 int ret; 198 199 xdp_ring = ena_napi->xdp_ring; 200 xdp_ring->first_interrupt = ena_napi->first_interrupt; 201 202 xdp_budget = budget; 203 204 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || 205 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { 206 napi_complete_done(napi, 0); 207 return 0; 208 } 209 210 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); 211 212 /* If the device is about to reset or down, avoid unmask 213 * the interrupt and return 0 so NAPI won't reschedule 214 */ 215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { 216 napi_complete_done(napi, 0); 217 ret = 0; 218 } else if (xdp_budget > xdp_work_done) { 219 napi_comp_call = 1; 220 if (napi_complete_done(napi, xdp_work_done)) 221 ena_unmask_interrupt(xdp_ring, NULL); 222 ena_update_ring_numa_node(xdp_ring, NULL); 223 ret = xdp_work_done; 224 } else { 225 ret = xdp_budget; 226 } 227 228 u64_stats_update_begin(&xdp_ring->syncp); 229 xdp_ring->tx_stats.napi_comp += napi_comp_call; 230 xdp_ring->tx_stats.tx_poll++; 231 u64_stats_update_end(&xdp_ring->syncp); 232 233 return ret; 234 } 235 236 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, 237 struct ena_tx_buffer *tx_info, 238 struct xdp_frame *xdpf, 239 void **push_hdr, 240 u32 *push_len) 241 { 242 struct ena_adapter *adapter = xdp_ring->adapter; 243 struct ena_com_buf *ena_buf; 244 dma_addr_t dma = 0; 245 u32 size; 246 247 tx_info->xdpf = xdpf; 248 size = tx_info->xdpf->len; 249 ena_buf = tx_info->bufs; 250 251 /* llq push buffer */ 252 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size); 253 *push_hdr = tx_info->xdpf->data; 254 255 if (size - *push_len > 0) { 256 dma = dma_map_single(xdp_ring->dev, 257 *push_hdr + *push_len, 258 size - *push_len, 259 DMA_TO_DEVICE); 260 if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) 261 goto error_report_dma_error; 262 263 tx_info->map_linear_data = 1; 264 tx_info->num_of_bufs = 1; 265 } 266 267 ena_buf->paddr = dma; 268 ena_buf->len = size; 269 270 return 0; 271 272 error_report_dma_error: 273 ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, 274 &xdp_ring->syncp); 275 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); 276 277 xdp_return_frame_rx_napi(tx_info->xdpf); 278 tx_info->xdpf = NULL; 279 tx_info->num_of_bufs = 0; 280 281 return -EINVAL; 282 } 283 284 static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, 285 struct net_device *dev, 286 struct xdp_frame *xdpf, 287 int flags) 288 { 289 struct ena_com_tx_ctx ena_tx_ctx = {}; 290 struct ena_tx_buffer *tx_info; 291 u16 next_to_use, req_id; 292 void *push_hdr; 293 u32 push_len; 294 int rc; 295 296 next_to_use = xdp_ring->next_to_use; 297 req_id = xdp_ring->free_ids[next_to_use]; 298 tx_info = &xdp_ring->tx_buffer_info[req_id]; 299 tx_info->num_of_bufs = 0; 300 301 rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len); 302 if (unlikely(rc)) 303 goto error_drop_packet; 304 305 ena_tx_ctx.ena_bufs = tx_info->bufs; 306 ena_tx_ctx.push_header = push_hdr; 307 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 308 ena_tx_ctx.req_id = req_id; 309 ena_tx_ctx.header_len = push_len; 310 311 rc = ena_xmit_common(dev, 312 xdp_ring, 313 tx_info, 314 &ena_tx_ctx, 315 next_to_use, 316 xdpf->len); 317 if (rc) 318 goto error_unmap_dma; 319 /* trigger the dma engine. ena_com_write_sq_doorbell() 320 * has a mb 321 */ 322 if (flags & XDP_XMIT_FLUSH) { 323 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); 324 ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, 325 &xdp_ring->syncp); 326 } 327 328 return rc; 329 330 error_unmap_dma: 331 ena_unmap_tx_buff(xdp_ring, tx_info); 332 tx_info->xdpf = NULL; 333 error_drop_packet: 334 xdp_return_frame(xdpf); 335 return rc; 336 } 337 338 static int ena_xdp_xmit(struct net_device *dev, int n, 339 struct xdp_frame **frames, u32 flags) 340 { 341 struct ena_adapter *adapter = netdev_priv(dev); 342 int qid, i, err, drops = 0; 343 struct ena_ring *xdp_ring; 344 345 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 346 return -EINVAL; 347 348 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 349 return -ENETDOWN; 350 351 /* We assume that all rings have the same XDP program */ 352 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) 353 return -ENXIO; 354 355 qid = smp_processor_id() % adapter->xdp_num_queues; 356 qid += adapter->xdp_first_ring; 357 xdp_ring = &adapter->tx_ring[qid]; 358 359 /* Other CPU ids might try to send thorugh this queue */ 360 spin_lock(&xdp_ring->xdp_tx_lock); 361 362 for (i = 0; i < n; i++) { 363 err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0); 364 /* The descriptor is freed by ena_xdp_xmit_frame in case 365 * of an error. 366 */ 367 if (err) 368 drops++; 369 } 370 371 /* Ring doorbell to make device aware of the packets */ 372 if (flags & XDP_XMIT_FLUSH) { 373 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); 374 ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, 375 &xdp_ring->syncp); 376 } 377 378 spin_unlock(&xdp_ring->xdp_tx_lock); 379 380 /* Return number of packets sent */ 381 return n - drops; 382 } 383 384 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) 385 { 386 struct bpf_prog *xdp_prog; 387 struct ena_ring *xdp_ring; 388 u32 verdict = XDP_PASS; 389 struct xdp_frame *xdpf; 390 u64 *xdp_stat; 391 int qid; 392 393 rcu_read_lock(); 394 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); 395 396 if (!xdp_prog) 397 goto out; 398 399 verdict = bpf_prog_run_xdp(xdp_prog, xdp); 400 401 switch (verdict) { 402 case XDP_TX: 403 xdpf = xdp_convert_buff_to_frame(xdp); 404 if (unlikely(!xdpf)) { 405 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); 406 xdp_stat = &rx_ring->rx_stats.xdp_aborted; 407 break; 408 } 409 410 /* Find xmit queue */ 411 qid = rx_ring->qid + rx_ring->adapter->num_io_queues; 412 xdp_ring = &rx_ring->adapter->tx_ring[qid]; 413 414 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ 415 spin_lock(&xdp_ring->xdp_tx_lock); 416 417 ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH); 418 419 spin_unlock(&xdp_ring->xdp_tx_lock); 420 xdp_stat = &rx_ring->rx_stats.xdp_tx; 421 break; 422 case XDP_REDIRECT: 423 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { 424 xdp_stat = &rx_ring->rx_stats.xdp_redirect; 425 break; 426 } 427 fallthrough; 428 case XDP_ABORTED: 429 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); 430 xdp_stat = &rx_ring->rx_stats.xdp_aborted; 431 break; 432 case XDP_DROP: 433 xdp_stat = &rx_ring->rx_stats.xdp_drop; 434 break; 435 case XDP_PASS: 436 xdp_stat = &rx_ring->rx_stats.xdp_pass; 437 break; 438 default: 439 bpf_warn_invalid_xdp_action(verdict); 440 xdp_stat = &rx_ring->rx_stats.xdp_invalid; 441 } 442 443 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); 444 out: 445 rcu_read_unlock(); 446 447 return verdict; 448 } 449 450 static void ena_init_all_xdp_queues(struct ena_adapter *adapter) 451 { 452 adapter->xdp_first_ring = adapter->num_io_queues; 453 adapter->xdp_num_queues = adapter->num_io_queues; 454 455 ena_init_io_rings(adapter, 456 adapter->xdp_first_ring, 457 adapter->xdp_num_queues); 458 } 459 460 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) 461 { 462 int rc = 0; 463 464 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, 465 adapter->xdp_num_queues); 466 if (rc) 467 goto setup_err; 468 469 rc = ena_create_io_tx_queues_in_range(adapter, 470 adapter->xdp_first_ring, 471 adapter->xdp_num_queues); 472 if (rc) 473 goto create_err; 474 475 return 0; 476 477 create_err: 478 ena_free_all_io_tx_resources(adapter); 479 setup_err: 480 return rc; 481 } 482 483 /* Provides a way for both kernel and bpf-prog to know 484 * more about the RX-queue a given XDP frame arrived on. 485 */ 486 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) 487 { 488 int rc; 489 490 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); 491 492 if (rc) { 493 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, 494 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", 495 rx_ring->qid, rc); 496 goto err; 497 } 498 499 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, 500 NULL); 501 502 if (rc) { 503 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, 504 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", 505 rx_ring->qid, rc); 506 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 507 } 508 509 err: 510 return rc; 511 } 512 513 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) 514 { 515 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); 516 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 517 } 518 519 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, 520 struct bpf_prog *prog, 521 int first, int count) 522 { 523 struct ena_ring *rx_ring; 524 int i = 0; 525 526 for (i = first; i < count; i++) { 527 rx_ring = &adapter->rx_ring[i]; 528 xchg(&rx_ring->xdp_bpf_prog, prog); 529 if (prog) { 530 ena_xdp_register_rxq_info(rx_ring); 531 rx_ring->rx_headroom = XDP_PACKET_HEADROOM; 532 } else { 533 ena_xdp_unregister_rxq_info(rx_ring); 534 rx_ring->rx_headroom = 0; 535 } 536 } 537 } 538 539 static void ena_xdp_exchange_program(struct ena_adapter *adapter, 540 struct bpf_prog *prog) 541 { 542 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); 543 544 ena_xdp_exchange_program_rx_in_range(adapter, 545 prog, 546 0, 547 adapter->num_io_queues); 548 549 if (old_bpf_prog) 550 bpf_prog_put(old_bpf_prog); 551 } 552 553 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) 554 { 555 bool was_up; 556 int rc; 557 558 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 559 560 if (was_up) 561 ena_down(adapter); 562 563 adapter->xdp_first_ring = 0; 564 adapter->xdp_num_queues = 0; 565 ena_xdp_exchange_program(adapter, NULL); 566 if (was_up) { 567 rc = ena_up(adapter); 568 if (rc) 569 return rc; 570 } 571 return 0; 572 } 573 574 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) 575 { 576 struct ena_adapter *adapter = netdev_priv(netdev); 577 struct bpf_prog *prog = bpf->prog; 578 struct bpf_prog *old_bpf_prog; 579 int rc, prev_mtu; 580 bool is_up; 581 582 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 583 rc = ena_xdp_allowed(adapter); 584 if (rc == ENA_XDP_ALLOWED) { 585 old_bpf_prog = adapter->xdp_bpf_prog; 586 if (prog) { 587 if (!is_up) { 588 ena_init_all_xdp_queues(adapter); 589 } else if (!old_bpf_prog) { 590 ena_down(adapter); 591 ena_init_all_xdp_queues(adapter); 592 } 593 ena_xdp_exchange_program(adapter, prog); 594 595 if (is_up && !old_bpf_prog) { 596 rc = ena_up(adapter); 597 if (rc) 598 return rc; 599 } 600 } else if (old_bpf_prog) { 601 rc = ena_destroy_and_free_all_xdp_queues(adapter); 602 if (rc) 603 return rc; 604 } 605 606 prev_mtu = netdev->max_mtu; 607 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; 608 609 if (!old_bpf_prog) 610 netif_info(adapter, drv, adapter->netdev, 611 "XDP program is set, changing the max_mtu from %d to %d", 612 prev_mtu, netdev->max_mtu); 613 614 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { 615 netif_err(adapter, drv, adapter->netdev, 616 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", 617 netdev->mtu, ENA_XDP_MAX_MTU); 618 NL_SET_ERR_MSG_MOD(bpf->extack, 619 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); 620 return -EINVAL; 621 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { 622 netif_err(adapter, drv, adapter->netdev, 623 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", 624 adapter->num_io_queues, adapter->max_num_io_queues); 625 NL_SET_ERR_MSG_MOD(bpf->extack, 626 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); 627 return -EINVAL; 628 } 629 630 return 0; 631 } 632 633 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp 634 * program as well as to query the current xdp program id. 635 */ 636 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 637 { 638 switch (bpf->command) { 639 case XDP_SETUP_PROG: 640 return ena_xdp_set(netdev, bpf); 641 default: 642 return -EINVAL; 643 } 644 return 0; 645 } 646 647 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) 648 { 649 #ifdef CONFIG_RFS_ACCEL 650 u32 i; 651 int rc; 652 653 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); 654 if (!adapter->netdev->rx_cpu_rmap) 655 return -ENOMEM; 656 for (i = 0; i < adapter->num_io_queues; i++) { 657 int irq_idx = ENA_IO_IRQ_IDX(i); 658 659 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, 660 pci_irq_vector(adapter->pdev, irq_idx)); 661 if (rc) { 662 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 663 adapter->netdev->rx_cpu_rmap = NULL; 664 return rc; 665 } 666 } 667 #endif /* CONFIG_RFS_ACCEL */ 668 return 0; 669 } 670 671 static void ena_init_io_rings_common(struct ena_adapter *adapter, 672 struct ena_ring *ring, u16 qid) 673 { 674 ring->qid = qid; 675 ring->pdev = adapter->pdev; 676 ring->dev = &adapter->pdev->dev; 677 ring->netdev = adapter->netdev; 678 ring->napi = &adapter->ena_napi[qid].napi; 679 ring->adapter = adapter; 680 ring->ena_dev = adapter->ena_dev; 681 ring->per_napi_packets = 0; 682 ring->cpu = 0; 683 ring->first_interrupt = false; 684 ring->no_interrupt_event_cnt = 0; 685 u64_stats_init(&ring->syncp); 686 } 687 688 static void ena_init_io_rings(struct ena_adapter *adapter, 689 int first_index, int count) 690 { 691 struct ena_com_dev *ena_dev; 692 struct ena_ring *txr, *rxr; 693 int i; 694 695 ena_dev = adapter->ena_dev; 696 697 for (i = first_index; i < first_index + count; i++) { 698 txr = &adapter->tx_ring[i]; 699 rxr = &adapter->rx_ring[i]; 700 701 /* TX common ring state */ 702 ena_init_io_rings_common(adapter, txr, i); 703 704 /* TX specific ring state */ 705 txr->ring_size = adapter->requested_tx_ring_size; 706 txr->tx_max_header_size = ena_dev->tx_max_header_size; 707 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 708 txr->sgl_size = adapter->max_tx_sgl_size; 709 txr->smoothed_interval = 710 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 711 txr->disable_meta_caching = adapter->disable_meta_caching; 712 spin_lock_init(&txr->xdp_tx_lock); 713 714 /* Don't init RX queues for xdp queues */ 715 if (!ENA_IS_XDP_INDEX(adapter, i)) { 716 /* RX common ring state */ 717 ena_init_io_rings_common(adapter, rxr, i); 718 719 /* RX specific ring state */ 720 rxr->ring_size = adapter->requested_rx_ring_size; 721 rxr->rx_copybreak = adapter->rx_copybreak; 722 rxr->sgl_size = adapter->max_rx_sgl_size; 723 rxr->smoothed_interval = 724 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 725 rxr->empty_rx_queue = 0; 726 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 727 } 728 } 729 } 730 731 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) 732 * @adapter: network interface device structure 733 * @qid: queue index 734 * 735 * Return 0 on success, negative on failure 736 */ 737 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 738 { 739 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 740 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 741 int size, i, node; 742 743 if (tx_ring->tx_buffer_info) { 744 netif_err(adapter, ifup, 745 adapter->netdev, "tx_buffer_info info is not NULL"); 746 return -EEXIST; 747 } 748 749 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 750 node = cpu_to_node(ena_irq->cpu); 751 752 tx_ring->tx_buffer_info = vzalloc_node(size, node); 753 if (!tx_ring->tx_buffer_info) { 754 tx_ring->tx_buffer_info = vzalloc(size); 755 if (!tx_ring->tx_buffer_info) 756 goto err_tx_buffer_info; 757 } 758 759 size = sizeof(u16) * tx_ring->ring_size; 760 tx_ring->free_ids = vzalloc_node(size, node); 761 if (!tx_ring->free_ids) { 762 tx_ring->free_ids = vzalloc(size); 763 if (!tx_ring->free_ids) 764 goto err_tx_free_ids; 765 } 766 767 size = tx_ring->tx_max_header_size; 768 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); 769 if (!tx_ring->push_buf_intermediate_buf) { 770 tx_ring->push_buf_intermediate_buf = vzalloc(size); 771 if (!tx_ring->push_buf_intermediate_buf) 772 goto err_push_buf_intermediate_buf; 773 } 774 775 /* Req id ring for TX out of order completions */ 776 for (i = 0; i < tx_ring->ring_size; i++) 777 tx_ring->free_ids[i] = i; 778 779 /* Reset tx statistics */ 780 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); 781 782 tx_ring->next_to_use = 0; 783 tx_ring->next_to_clean = 0; 784 tx_ring->cpu = ena_irq->cpu; 785 return 0; 786 787 err_push_buf_intermediate_buf: 788 vfree(tx_ring->free_ids); 789 tx_ring->free_ids = NULL; 790 err_tx_free_ids: 791 vfree(tx_ring->tx_buffer_info); 792 tx_ring->tx_buffer_info = NULL; 793 err_tx_buffer_info: 794 return -ENOMEM; 795 } 796 797 /* ena_free_tx_resources - Free I/O Tx Resources per Queue 798 * @adapter: network interface device structure 799 * @qid: queue index 800 * 801 * Free all transmit software resources 802 */ 803 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) 804 { 805 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 806 807 vfree(tx_ring->tx_buffer_info); 808 tx_ring->tx_buffer_info = NULL; 809 810 vfree(tx_ring->free_ids); 811 tx_ring->free_ids = NULL; 812 813 vfree(tx_ring->push_buf_intermediate_buf); 814 tx_ring->push_buf_intermediate_buf = NULL; 815 } 816 817 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, 818 int first_index, 819 int count) 820 { 821 int i, rc = 0; 822 823 for (i = first_index; i < first_index + count; i++) { 824 rc = ena_setup_tx_resources(adapter, i); 825 if (rc) 826 goto err_setup_tx; 827 } 828 829 return 0; 830 831 err_setup_tx: 832 833 netif_err(adapter, ifup, adapter->netdev, 834 "Tx queue %d: allocation failed\n", i); 835 836 /* rewind the index freeing the rings as we go */ 837 while (first_index < i--) 838 ena_free_tx_resources(adapter, i); 839 return rc; 840 } 841 842 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, 843 int first_index, int count) 844 { 845 int i; 846 847 for (i = first_index; i < first_index + count; i++) 848 ena_free_tx_resources(adapter, i); 849 } 850 851 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues 852 * @adapter: board private structure 853 * 854 * Free all transmit software resources 855 */ 856 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) 857 { 858 ena_free_all_io_tx_resources_in_range(adapter, 859 0, 860 adapter->xdp_num_queues + 861 adapter->num_io_queues); 862 } 863 864 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) 865 * @adapter: network interface device structure 866 * @qid: queue index 867 * 868 * Returns 0 on success, negative on failure 869 */ 870 static int ena_setup_rx_resources(struct ena_adapter *adapter, 871 u32 qid) 872 { 873 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 874 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; 875 int size, node, i; 876 877 if (rx_ring->rx_buffer_info) { 878 netif_err(adapter, ifup, adapter->netdev, 879 "rx_buffer_info is not NULL"); 880 return -EEXIST; 881 } 882 883 /* alloc extra element so in rx path 884 * we can always prefetch rx_info + 1 885 */ 886 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); 887 node = cpu_to_node(ena_irq->cpu); 888 889 rx_ring->rx_buffer_info = vzalloc_node(size, node); 890 if (!rx_ring->rx_buffer_info) { 891 rx_ring->rx_buffer_info = vzalloc(size); 892 if (!rx_ring->rx_buffer_info) 893 return -ENOMEM; 894 } 895 896 size = sizeof(u16) * rx_ring->ring_size; 897 rx_ring->free_ids = vzalloc_node(size, node); 898 if (!rx_ring->free_ids) { 899 rx_ring->free_ids = vzalloc(size); 900 if (!rx_ring->free_ids) { 901 vfree(rx_ring->rx_buffer_info); 902 rx_ring->rx_buffer_info = NULL; 903 return -ENOMEM; 904 } 905 } 906 907 /* Req id ring for receiving RX pkts out of order */ 908 for (i = 0; i < rx_ring->ring_size; i++) 909 rx_ring->free_ids[i] = i; 910 911 /* Reset rx statistics */ 912 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); 913 914 rx_ring->next_to_clean = 0; 915 rx_ring->next_to_use = 0; 916 rx_ring->cpu = ena_irq->cpu; 917 918 return 0; 919 } 920 921 /* ena_free_rx_resources - Free I/O Rx Resources 922 * @adapter: network interface device structure 923 * @qid: queue index 924 * 925 * Free all receive software resources 926 */ 927 static void ena_free_rx_resources(struct ena_adapter *adapter, 928 u32 qid) 929 { 930 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 931 932 vfree(rx_ring->rx_buffer_info); 933 rx_ring->rx_buffer_info = NULL; 934 935 vfree(rx_ring->free_ids); 936 rx_ring->free_ids = NULL; 937 } 938 939 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues 940 * @adapter: board private structure 941 * 942 * Return 0 on success, negative on failure 943 */ 944 static int ena_setup_all_rx_resources(struct ena_adapter *adapter) 945 { 946 int i, rc = 0; 947 948 for (i = 0; i < adapter->num_io_queues; i++) { 949 rc = ena_setup_rx_resources(adapter, i); 950 if (rc) 951 goto err_setup_rx; 952 } 953 954 return 0; 955 956 err_setup_rx: 957 958 netif_err(adapter, ifup, adapter->netdev, 959 "Rx queue %d: allocation failed\n", i); 960 961 /* rewind the index freeing the rings as we go */ 962 while (i--) 963 ena_free_rx_resources(adapter, i); 964 return rc; 965 } 966 967 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues 968 * @adapter: board private structure 969 * 970 * Free all receive software resources 971 */ 972 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) 973 { 974 int i; 975 976 for (i = 0; i < adapter->num_io_queues; i++) 977 ena_free_rx_resources(adapter, i); 978 } 979 980 static int ena_alloc_rx_page(struct ena_ring *rx_ring, 981 struct ena_rx_buffer *rx_info, gfp_t gfp) 982 { 983 int headroom = rx_ring->rx_headroom; 984 struct ena_com_buf *ena_buf; 985 struct page *page; 986 dma_addr_t dma; 987 988 /* restore page offset value in case it has been changed by device */ 989 rx_info->page_offset = headroom; 990 991 /* if previous allocated page is not used */ 992 if (unlikely(rx_info->page)) 993 return 0; 994 995 page = alloc_page(gfp); 996 if (unlikely(!page)) { 997 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, 998 &rx_ring->syncp); 999 return -ENOMEM; 1000 } 1001 1002 /* To enable NIC-side port-mirroring, AKA SPAN port, 1003 * we make the buffer readable from the nic as well 1004 */ 1005 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, 1006 DMA_BIDIRECTIONAL); 1007 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 1008 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, 1009 &rx_ring->syncp); 1010 1011 __free_page(page); 1012 return -EIO; 1013 } 1014 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1015 "Allocate page %p, rx_info %p\n", page, rx_info); 1016 1017 rx_info->page = page; 1018 ena_buf = &rx_info->ena_buf; 1019 ena_buf->paddr = dma + headroom; 1020 ena_buf->len = ENA_PAGE_SIZE - headroom; 1021 1022 return 0; 1023 } 1024 1025 static void ena_unmap_rx_buff(struct ena_ring *rx_ring, 1026 struct ena_rx_buffer *rx_info) 1027 { 1028 struct ena_com_buf *ena_buf = &rx_info->ena_buf; 1029 1030 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, 1031 ENA_PAGE_SIZE, 1032 DMA_BIDIRECTIONAL); 1033 } 1034 1035 static void ena_free_rx_page(struct ena_ring *rx_ring, 1036 struct ena_rx_buffer *rx_info) 1037 { 1038 struct page *page = rx_info->page; 1039 1040 if (unlikely(!page)) { 1041 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 1042 "Trying to free unallocated buffer\n"); 1043 return; 1044 } 1045 1046 ena_unmap_rx_buff(rx_ring, rx_info); 1047 1048 __free_page(page); 1049 rx_info->page = NULL; 1050 } 1051 1052 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) 1053 { 1054 u16 next_to_use, req_id; 1055 u32 i; 1056 int rc; 1057 1058 next_to_use = rx_ring->next_to_use; 1059 1060 for (i = 0; i < num; i++) { 1061 struct ena_rx_buffer *rx_info; 1062 1063 req_id = rx_ring->free_ids[next_to_use]; 1064 1065 rx_info = &rx_ring->rx_buffer_info[req_id]; 1066 1067 rc = ena_alloc_rx_page(rx_ring, rx_info, 1068 GFP_ATOMIC | __GFP_COMP); 1069 if (unlikely(rc < 0)) { 1070 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 1071 "Failed to allocate buffer for rx queue %d\n", 1072 rx_ring->qid); 1073 break; 1074 } 1075 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 1076 &rx_info->ena_buf, 1077 req_id); 1078 if (unlikely(rc)) { 1079 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 1080 "Failed to add buffer for rx queue %d\n", 1081 rx_ring->qid); 1082 break; 1083 } 1084 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 1085 rx_ring->ring_size); 1086 } 1087 1088 if (unlikely(i < num)) { 1089 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, 1090 &rx_ring->syncp); 1091 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, 1092 "Refilled rx qid %d with only %d buffers (from %d)\n", 1093 rx_ring->qid, i, num); 1094 } 1095 1096 /* ena_com_write_sq_doorbell issues a wmb() */ 1097 if (likely(i)) 1098 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 1099 1100 rx_ring->next_to_use = next_to_use; 1101 1102 return i; 1103 } 1104 1105 static void ena_free_rx_bufs(struct ena_adapter *adapter, 1106 u32 qid) 1107 { 1108 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 1109 u32 i; 1110 1111 for (i = 0; i < rx_ring->ring_size; i++) { 1112 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 1113 1114 if (rx_info->page) 1115 ena_free_rx_page(rx_ring, rx_info); 1116 } 1117 } 1118 1119 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers 1120 * @adapter: board private structure 1121 */ 1122 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) 1123 { 1124 struct ena_ring *rx_ring; 1125 int i, rc, bufs_num; 1126 1127 for (i = 0; i < adapter->num_io_queues; i++) { 1128 rx_ring = &adapter->rx_ring[i]; 1129 bufs_num = rx_ring->ring_size - 1; 1130 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 1131 1132 if (unlikely(rc != bufs_num)) 1133 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, 1134 "Refilling Queue %d failed. allocated %d buffers from: %d\n", 1135 i, rc, bufs_num); 1136 } 1137 } 1138 1139 static void ena_free_all_rx_bufs(struct ena_adapter *adapter) 1140 { 1141 int i; 1142 1143 for (i = 0; i < adapter->num_io_queues; i++) 1144 ena_free_rx_bufs(adapter, i); 1145 } 1146 1147 static void ena_unmap_tx_buff(struct ena_ring *tx_ring, 1148 struct ena_tx_buffer *tx_info) 1149 { 1150 struct ena_com_buf *ena_buf; 1151 u32 cnt; 1152 int i; 1153 1154 ena_buf = tx_info->bufs; 1155 cnt = tx_info->num_of_bufs; 1156 1157 if (unlikely(!cnt)) 1158 return; 1159 1160 if (tx_info->map_linear_data) { 1161 dma_unmap_single(tx_ring->dev, 1162 dma_unmap_addr(ena_buf, paddr), 1163 dma_unmap_len(ena_buf, len), 1164 DMA_TO_DEVICE); 1165 ena_buf++; 1166 cnt--; 1167 } 1168 1169 /* unmap remaining mapped pages */ 1170 for (i = 0; i < cnt; i++) { 1171 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), 1172 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); 1173 ena_buf++; 1174 } 1175 } 1176 1177 /* ena_free_tx_bufs - Free Tx Buffers per Queue 1178 * @tx_ring: TX ring for which buffers be freed 1179 */ 1180 static void ena_free_tx_bufs(struct ena_ring *tx_ring) 1181 { 1182 bool print_once = true; 1183 u32 i; 1184 1185 for (i = 0; i < tx_ring->ring_size; i++) { 1186 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 1187 1188 if (!tx_info->skb) 1189 continue; 1190 1191 if (print_once) { 1192 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, 1193 "Free uncompleted tx skb qid %d idx 0x%x\n", 1194 tx_ring->qid, i); 1195 print_once = false; 1196 } else { 1197 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, 1198 "Free uncompleted tx skb qid %d idx 0x%x\n", 1199 tx_ring->qid, i); 1200 } 1201 1202 ena_unmap_tx_buff(tx_ring, tx_info); 1203 1204 dev_kfree_skb_any(tx_info->skb); 1205 } 1206 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, 1207 tx_ring->qid)); 1208 } 1209 1210 static void ena_free_all_tx_bufs(struct ena_adapter *adapter) 1211 { 1212 struct ena_ring *tx_ring; 1213 int i; 1214 1215 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 1216 tx_ring = &adapter->tx_ring[i]; 1217 ena_free_tx_bufs(tx_ring); 1218 } 1219 } 1220 1221 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) 1222 { 1223 u16 ena_qid; 1224 int i; 1225 1226 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { 1227 ena_qid = ENA_IO_TXQ_IDX(i); 1228 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 1229 } 1230 } 1231 1232 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) 1233 { 1234 u16 ena_qid; 1235 int i; 1236 1237 for (i = 0; i < adapter->num_io_queues; i++) { 1238 ena_qid = ENA_IO_RXQ_IDX(i); 1239 cancel_work_sync(&adapter->ena_napi[i].dim.work); 1240 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 1241 } 1242 } 1243 1244 static void ena_destroy_all_io_queues(struct ena_adapter *adapter) 1245 { 1246 ena_destroy_all_tx_queues(adapter); 1247 ena_destroy_all_rx_queues(adapter); 1248 } 1249 1250 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, 1251 struct ena_tx_buffer *tx_info, bool is_xdp) 1252 { 1253 if (tx_info) 1254 netif_err(ring->adapter, 1255 tx_done, 1256 ring->netdev, 1257 "tx_info doesn't have valid %s", 1258 is_xdp ? "xdp frame" : "skb"); 1259 else 1260 netif_err(ring->adapter, 1261 tx_done, 1262 ring->netdev, 1263 "Invalid req_id: %hu\n", 1264 req_id); 1265 1266 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); 1267 1268 /* Trigger device reset */ 1269 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 1270 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags); 1271 return -EFAULT; 1272 } 1273 1274 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 1275 { 1276 struct ena_tx_buffer *tx_info = NULL; 1277 1278 if (likely(req_id < tx_ring->ring_size)) { 1279 tx_info = &tx_ring->tx_buffer_info[req_id]; 1280 if (likely(tx_info->skb)) 1281 return 0; 1282 } 1283 1284 return handle_invalid_req_id(tx_ring, req_id, tx_info, false); 1285 } 1286 1287 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) 1288 { 1289 struct ena_tx_buffer *tx_info = NULL; 1290 1291 if (likely(req_id < xdp_ring->ring_size)) { 1292 tx_info = &xdp_ring->tx_buffer_info[req_id]; 1293 if (likely(tx_info->xdpf)) 1294 return 0; 1295 } 1296 1297 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); 1298 } 1299 1300 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) 1301 { 1302 struct netdev_queue *txq; 1303 bool above_thresh; 1304 u32 tx_bytes = 0; 1305 u32 total_done = 0; 1306 u16 next_to_clean; 1307 u16 req_id; 1308 int tx_pkts = 0; 1309 int rc; 1310 1311 next_to_clean = tx_ring->next_to_clean; 1312 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); 1313 1314 while (tx_pkts < budget) { 1315 struct ena_tx_buffer *tx_info; 1316 struct sk_buff *skb; 1317 1318 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, 1319 &req_id); 1320 if (rc) 1321 break; 1322 1323 rc = validate_tx_req_id(tx_ring, req_id); 1324 if (rc) 1325 break; 1326 1327 tx_info = &tx_ring->tx_buffer_info[req_id]; 1328 skb = tx_info->skb; 1329 1330 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ 1331 prefetch(&skb->end); 1332 1333 tx_info->skb = NULL; 1334 tx_info->last_jiffies = 0; 1335 1336 ena_unmap_tx_buff(tx_ring, tx_info); 1337 1338 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 1339 "tx_poll: q %d skb %p completed\n", tx_ring->qid, 1340 skb); 1341 1342 tx_bytes += skb->len; 1343 dev_kfree_skb(skb); 1344 tx_pkts++; 1345 total_done += tx_info->tx_descs; 1346 1347 tx_ring->free_ids[next_to_clean] = req_id; 1348 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 1349 tx_ring->ring_size); 1350 } 1351 1352 tx_ring->next_to_clean = next_to_clean; 1353 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); 1354 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 1355 1356 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 1357 1358 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 1359 "tx_poll: q %d done. total pkts: %d\n", 1360 tx_ring->qid, tx_pkts); 1361 1362 /* need to make the rings circular update visible to 1363 * ena_start_xmit() before checking for netif_queue_stopped(). 1364 */ 1365 smp_mb(); 1366 1367 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 1368 ENA_TX_WAKEUP_THRESH); 1369 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { 1370 __netif_tx_lock(txq, smp_processor_id()); 1371 above_thresh = 1372 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 1373 ENA_TX_WAKEUP_THRESH); 1374 if (netif_tx_queue_stopped(txq) && above_thresh && 1375 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { 1376 netif_tx_wake_queue(txq); 1377 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, 1378 &tx_ring->syncp); 1379 } 1380 __netif_tx_unlock(txq); 1381 } 1382 1383 return tx_pkts; 1384 } 1385 1386 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) 1387 { 1388 struct sk_buff *skb; 1389 1390 if (frags) 1391 skb = napi_get_frags(rx_ring->napi); 1392 else 1393 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 1394 rx_ring->rx_copybreak); 1395 1396 if (unlikely(!skb)) { 1397 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, 1398 &rx_ring->syncp); 1399 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 1400 "Failed to allocate skb. frags: %d\n", frags); 1401 return NULL; 1402 } 1403 1404 return skb; 1405 } 1406 1407 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, 1408 struct ena_com_rx_buf_info *ena_bufs, 1409 u32 descs, 1410 u16 *next_to_clean) 1411 { 1412 struct sk_buff *skb; 1413 struct ena_rx_buffer *rx_info; 1414 u16 len, req_id, buf = 0; 1415 void *va; 1416 1417 len = ena_bufs[buf].len; 1418 req_id = ena_bufs[buf].req_id; 1419 1420 rx_info = &rx_ring->rx_buffer_info[req_id]; 1421 1422 if (unlikely(!rx_info->page)) { 1423 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 1424 "Page is NULL\n"); 1425 return NULL; 1426 } 1427 1428 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1429 "rx_info %p page %p\n", 1430 rx_info, rx_info->page); 1431 1432 /* save virt address of first buffer */ 1433 va = page_address(rx_info->page) + rx_info->page_offset; 1434 1435 prefetch(va); 1436 1437 if (len <= rx_ring->rx_copybreak) { 1438 skb = ena_alloc_skb(rx_ring, false); 1439 if (unlikely(!skb)) 1440 return NULL; 1441 1442 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1443 "RX allocated small packet. len %d. data_len %d\n", 1444 skb->len, skb->data_len); 1445 1446 /* sync this buffer for CPU use */ 1447 dma_sync_single_for_cpu(rx_ring->dev, 1448 dma_unmap_addr(&rx_info->ena_buf, paddr), 1449 len, 1450 DMA_FROM_DEVICE); 1451 skb_copy_to_linear_data(skb, va, len); 1452 dma_sync_single_for_device(rx_ring->dev, 1453 dma_unmap_addr(&rx_info->ena_buf, paddr), 1454 len, 1455 DMA_FROM_DEVICE); 1456 1457 skb_put(skb, len); 1458 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1459 rx_ring->free_ids[*next_to_clean] = req_id; 1460 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, 1461 rx_ring->ring_size); 1462 return skb; 1463 } 1464 1465 skb = ena_alloc_skb(rx_ring, true); 1466 if (unlikely(!skb)) 1467 return NULL; 1468 1469 do { 1470 ena_unmap_rx_buff(rx_ring, rx_info); 1471 1472 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 1473 rx_info->page_offset, len, ENA_PAGE_SIZE); 1474 1475 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1476 "RX skb updated. len %d. data_len %d\n", 1477 skb->len, skb->data_len); 1478 1479 rx_info->page = NULL; 1480 1481 rx_ring->free_ids[*next_to_clean] = req_id; 1482 *next_to_clean = 1483 ENA_RX_RING_IDX_NEXT(*next_to_clean, 1484 rx_ring->ring_size); 1485 if (likely(--descs == 0)) 1486 break; 1487 1488 buf++; 1489 len = ena_bufs[buf].len; 1490 req_id = ena_bufs[buf].req_id; 1491 1492 rx_info = &rx_ring->rx_buffer_info[req_id]; 1493 } while (1); 1494 1495 return skb; 1496 } 1497 1498 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum 1499 * @adapter: structure containing adapter specific data 1500 * @ena_rx_ctx: received packet context/metadata 1501 * @skb: skb currently being received and modified 1502 */ 1503 static void ena_rx_checksum(struct ena_ring *rx_ring, 1504 struct ena_com_rx_ctx *ena_rx_ctx, 1505 struct sk_buff *skb) 1506 { 1507 /* Rx csum disabled */ 1508 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { 1509 skb->ip_summed = CHECKSUM_NONE; 1510 return; 1511 } 1512 1513 /* For fragmented packets the checksum isn't valid */ 1514 if (ena_rx_ctx->frag) { 1515 skb->ip_summed = CHECKSUM_NONE; 1516 return; 1517 } 1518 1519 /* if IP and error */ 1520 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 1521 (ena_rx_ctx->l3_csum_err))) { 1522 /* ipv4 checksum error */ 1523 skb->ip_summed = CHECKSUM_NONE; 1524 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, 1525 &rx_ring->syncp); 1526 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 1527 "RX IPv4 header checksum error\n"); 1528 return; 1529 } 1530 1531 /* if TCP/UDP */ 1532 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 1533 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { 1534 if (unlikely(ena_rx_ctx->l4_csum_err)) { 1535 /* TCP/UDP checksum error */ 1536 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, 1537 &rx_ring->syncp); 1538 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, 1539 "RX L4 checksum error\n"); 1540 skb->ip_summed = CHECKSUM_NONE; 1541 return; 1542 } 1543 1544 if (likely(ena_rx_ctx->l4_csum_checked)) { 1545 skb->ip_summed = CHECKSUM_UNNECESSARY; 1546 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, 1547 &rx_ring->syncp); 1548 } else { 1549 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, 1550 &rx_ring->syncp); 1551 skb->ip_summed = CHECKSUM_NONE; 1552 } 1553 } else { 1554 skb->ip_summed = CHECKSUM_NONE; 1555 return; 1556 } 1557 1558 } 1559 1560 static void ena_set_rx_hash(struct ena_ring *rx_ring, 1561 struct ena_com_rx_ctx *ena_rx_ctx, 1562 struct sk_buff *skb) 1563 { 1564 enum pkt_hash_types hash_type; 1565 1566 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { 1567 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 1568 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) 1569 1570 hash_type = PKT_HASH_TYPE_L4; 1571 else 1572 hash_type = PKT_HASH_TYPE_NONE; 1573 1574 /* Override hash type if the packet is fragmented */ 1575 if (ena_rx_ctx->frag) 1576 hash_type = PKT_HASH_TYPE_NONE; 1577 1578 skb_set_hash(skb, ena_rx_ctx->hash, hash_type); 1579 } 1580 } 1581 1582 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) 1583 { 1584 struct ena_rx_buffer *rx_info; 1585 int ret; 1586 1587 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; 1588 xdp->data = page_address(rx_info->page) + rx_info->page_offset; 1589 xdp_set_data_meta_invalid(xdp); 1590 xdp->data_hard_start = page_address(rx_info->page); 1591 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len; 1592 /* If for some reason we received a bigger packet than 1593 * we expect, then we simply drop it 1594 */ 1595 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) 1596 return XDP_DROP; 1597 1598 ret = ena_xdp_execute(rx_ring, xdp); 1599 1600 /* The xdp program might expand the headers */ 1601 if (ret == XDP_PASS) { 1602 rx_info->page_offset = xdp->data - xdp->data_hard_start; 1603 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; 1604 } 1605 1606 return ret; 1607 } 1608 /* ena_clean_rx_irq - Cleanup RX irq 1609 * @rx_ring: RX ring to clean 1610 * @napi: napi handler 1611 * @budget: how many packets driver is allowed to clean 1612 * 1613 * Returns the number of cleaned buffers. 1614 */ 1615 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, 1616 u32 budget) 1617 { 1618 u16 next_to_clean = rx_ring->next_to_clean; 1619 struct ena_com_rx_ctx ena_rx_ctx; 1620 struct ena_rx_buffer *rx_info; 1621 struct ena_adapter *adapter; 1622 u32 res_budget, work_done; 1623 int rx_copybreak_pkt = 0; 1624 int refill_threshold; 1625 struct sk_buff *skb; 1626 int refill_required; 1627 struct xdp_buff xdp; 1628 int xdp_flags = 0; 1629 int total_len = 0; 1630 int xdp_verdict; 1631 int rc = 0; 1632 int i; 1633 1634 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1635 "%s qid %d\n", __func__, rx_ring->qid); 1636 res_budget = budget; 1637 xdp.rxq = &rx_ring->xdp_rxq; 1638 xdp.frame_sz = ENA_PAGE_SIZE; 1639 1640 do { 1641 xdp_verdict = XDP_PASS; 1642 skb = NULL; 1643 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1644 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 1645 ena_rx_ctx.descs = 0; 1646 ena_rx_ctx.pkt_offset = 0; 1647 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1648 rx_ring->ena_com_io_sq, 1649 &ena_rx_ctx); 1650 if (unlikely(rc)) 1651 goto error; 1652 1653 if (unlikely(ena_rx_ctx.descs == 0)) 1654 break; 1655 1656 /* First descriptor might have an offset set by the device */ 1657 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; 1658 rx_info->page_offset += ena_rx_ctx.pkt_offset; 1659 1660 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 1661 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", 1662 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 1663 ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 1664 1665 if (ena_xdp_present_ring(rx_ring)) 1666 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); 1667 1668 /* allocate skb and fill it */ 1669 if (xdp_verdict == XDP_PASS) 1670 skb = ena_rx_skb(rx_ring, 1671 rx_ring->ena_bufs, 1672 ena_rx_ctx.descs, 1673 &next_to_clean); 1674 1675 if (unlikely(!skb)) { 1676 for (i = 0; i < ena_rx_ctx.descs; i++) { 1677 int req_id = rx_ring->ena_bufs[i].req_id; 1678 1679 rx_ring->free_ids[next_to_clean] = req_id; 1680 next_to_clean = 1681 ENA_RX_RING_IDX_NEXT(next_to_clean, 1682 rx_ring->ring_size); 1683 1684 /* Packets was passed for transmission, unmap it 1685 * from RX side. 1686 */ 1687 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { 1688 ena_unmap_rx_buff(rx_ring, 1689 &rx_ring->rx_buffer_info[req_id]); 1690 rx_ring->rx_buffer_info[req_id].page = NULL; 1691 } 1692 } 1693 if (xdp_verdict != XDP_PASS) { 1694 xdp_flags |= xdp_verdict; 1695 res_budget--; 1696 continue; 1697 } 1698 break; 1699 } 1700 1701 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); 1702 1703 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); 1704 1705 skb_record_rx_queue(skb, rx_ring->qid); 1706 1707 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { 1708 total_len += rx_ring->ena_bufs[0].len; 1709 rx_copybreak_pkt++; 1710 napi_gro_receive(napi, skb); 1711 } else { 1712 total_len += skb->len; 1713 napi_gro_frags(napi); 1714 } 1715 1716 res_budget--; 1717 } while (likely(res_budget)); 1718 1719 work_done = budget - res_budget; 1720 rx_ring->per_napi_packets += work_done; 1721 u64_stats_update_begin(&rx_ring->syncp); 1722 rx_ring->rx_stats.bytes += total_len; 1723 rx_ring->rx_stats.cnt += work_done; 1724 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; 1725 u64_stats_update_end(&rx_ring->syncp); 1726 1727 rx_ring->next_to_clean = next_to_clean; 1728 1729 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 1730 refill_threshold = 1731 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 1732 ENA_RX_REFILL_THRESH_PACKET); 1733 1734 /* Optimization, try to batch new rx buffers */ 1735 if (refill_required > refill_threshold) { 1736 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 1737 ena_refill_rx_bufs(rx_ring, refill_required); 1738 } 1739 1740 if (xdp_flags & XDP_REDIRECT) 1741 xdp_do_flush_map(); 1742 1743 return work_done; 1744 1745 error: 1746 adapter = netdev_priv(rx_ring->netdev); 1747 1748 if (rc == -ENOSPC) { 1749 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, 1750 &rx_ring->syncp); 1751 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1752 } else { 1753 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, 1754 &rx_ring->syncp); 1755 adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 1756 } 1757 1758 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 1759 1760 return 0; 1761 } 1762 1763 static void ena_dim_work(struct work_struct *w) 1764 { 1765 struct dim *dim = container_of(w, struct dim, work); 1766 struct dim_cq_moder cur_moder = 1767 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1768 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); 1769 1770 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; 1771 dim->state = DIM_START_MEASURE; 1772 } 1773 1774 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) 1775 { 1776 struct dim_sample dim_sample; 1777 struct ena_ring *rx_ring = ena_napi->rx_ring; 1778 1779 if (!rx_ring->per_napi_packets) 1780 return; 1781 1782 rx_ring->non_empty_napi_events++; 1783 1784 dim_update_sample(rx_ring->non_empty_napi_events, 1785 rx_ring->rx_stats.cnt, 1786 rx_ring->rx_stats.bytes, 1787 &dim_sample); 1788 1789 net_dim(&ena_napi->dim, dim_sample); 1790 1791 rx_ring->per_napi_packets = 0; 1792 } 1793 1794 static void ena_unmask_interrupt(struct ena_ring *tx_ring, 1795 struct ena_ring *rx_ring) 1796 { 1797 struct ena_eth_io_intr_reg intr_reg; 1798 u32 rx_interval = 0; 1799 /* Rx ring can be NULL when for XDP tx queues which don't have an 1800 * accompanying rx_ring pair. 1801 */ 1802 if (rx_ring) 1803 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? 1804 rx_ring->smoothed_interval : 1805 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); 1806 1807 /* Update intr register: rx intr delay, 1808 * tx intr delay and interrupt unmask 1809 */ 1810 ena_com_update_intr_reg(&intr_reg, 1811 rx_interval, 1812 tx_ring->smoothed_interval, 1813 true); 1814 1815 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, 1816 &tx_ring->syncp); 1817 1818 /* It is a shared MSI-X. 1819 * Tx and Rx CQ have pointer to it. 1820 * So we use one of them to reach the intr reg 1821 * The Tx ring is used because the rx_ring is NULL for XDP queues 1822 */ 1823 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); 1824 } 1825 1826 static void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1827 struct ena_ring *rx_ring) 1828 { 1829 int cpu = get_cpu(); 1830 int numa_node; 1831 1832 /* Check only one ring since the 2 rings are running on the same cpu */ 1833 if (likely(tx_ring->cpu == cpu)) 1834 goto out; 1835 1836 numa_node = cpu_to_node(cpu); 1837 put_cpu(); 1838 1839 if (numa_node != NUMA_NO_NODE) { 1840 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); 1841 if (rx_ring) 1842 ena_com_update_numa_node(rx_ring->ena_com_io_cq, 1843 numa_node); 1844 } 1845 1846 tx_ring->cpu = cpu; 1847 if (rx_ring) 1848 rx_ring->cpu = cpu; 1849 1850 return; 1851 out: 1852 put_cpu(); 1853 } 1854 1855 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) 1856 { 1857 u32 total_done = 0; 1858 u16 next_to_clean; 1859 u32 tx_bytes = 0; 1860 int tx_pkts = 0; 1861 u16 req_id; 1862 int rc; 1863 1864 if (unlikely(!xdp_ring)) 1865 return 0; 1866 next_to_clean = xdp_ring->next_to_clean; 1867 1868 while (tx_pkts < budget) { 1869 struct ena_tx_buffer *tx_info; 1870 struct xdp_frame *xdpf; 1871 1872 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, 1873 &req_id); 1874 if (rc) 1875 break; 1876 1877 rc = validate_xdp_req_id(xdp_ring, req_id); 1878 if (rc) 1879 break; 1880 1881 tx_info = &xdp_ring->tx_buffer_info[req_id]; 1882 xdpf = tx_info->xdpf; 1883 1884 tx_info->xdpf = NULL; 1885 tx_info->last_jiffies = 0; 1886 ena_unmap_tx_buff(xdp_ring, tx_info); 1887 1888 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, 1889 "tx_poll: q %d skb %p completed\n", xdp_ring->qid, 1890 xdpf); 1891 1892 tx_bytes += xdpf->len; 1893 tx_pkts++; 1894 total_done += tx_info->tx_descs; 1895 1896 xdp_return_frame(xdpf); 1897 xdp_ring->free_ids[next_to_clean] = req_id; 1898 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 1899 xdp_ring->ring_size); 1900 } 1901 1902 xdp_ring->next_to_clean = next_to_clean; 1903 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); 1904 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); 1905 1906 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, 1907 "tx_poll: q %d done. total pkts: %d\n", 1908 xdp_ring->qid, tx_pkts); 1909 1910 return tx_pkts; 1911 } 1912 1913 static int ena_io_poll(struct napi_struct *napi, int budget) 1914 { 1915 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1916 struct ena_ring *tx_ring, *rx_ring; 1917 int tx_work_done; 1918 int rx_work_done = 0; 1919 int tx_budget; 1920 int napi_comp_call = 0; 1921 int ret; 1922 1923 tx_ring = ena_napi->tx_ring; 1924 rx_ring = ena_napi->rx_ring; 1925 1926 tx_ring->first_interrupt = ena_napi->first_interrupt; 1927 rx_ring->first_interrupt = ena_napi->first_interrupt; 1928 1929 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; 1930 1931 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1932 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { 1933 napi_complete_done(napi, 0); 1934 return 0; 1935 } 1936 1937 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); 1938 /* On netpoll the budget is zero and the handler should only clean the 1939 * tx completions. 1940 */ 1941 if (likely(budget)) 1942 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); 1943 1944 /* If the device is about to reset or down, avoid unmask 1945 * the interrupt and return 0 so NAPI won't reschedule 1946 */ 1947 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || 1948 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { 1949 napi_complete_done(napi, 0); 1950 ret = 0; 1951 1952 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { 1953 napi_comp_call = 1; 1954 1955 /* Update numa and unmask the interrupt only when schedule 1956 * from the interrupt context (vs from sk_busy_loop) 1957 */ 1958 if (napi_complete_done(napi, rx_work_done) && 1959 READ_ONCE(ena_napi->interrupts_masked)) { 1960 smp_rmb(); /* make sure interrupts_masked is read */ 1961 WRITE_ONCE(ena_napi->interrupts_masked, false); 1962 /* We apply adaptive moderation on Rx path only. 1963 * Tx uses static interrupt moderation. 1964 */ 1965 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1966 ena_adjust_adaptive_rx_intr_moderation(ena_napi); 1967 1968 ena_unmask_interrupt(tx_ring, rx_ring); 1969 } 1970 1971 ena_update_ring_numa_node(tx_ring, rx_ring); 1972 1973 ret = rx_work_done; 1974 } else { 1975 ret = budget; 1976 } 1977 1978 u64_stats_update_begin(&tx_ring->syncp); 1979 tx_ring->tx_stats.napi_comp += napi_comp_call; 1980 tx_ring->tx_stats.tx_poll++; 1981 u64_stats_update_end(&tx_ring->syncp); 1982 1983 return ret; 1984 } 1985 1986 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) 1987 { 1988 struct ena_adapter *adapter = (struct ena_adapter *)data; 1989 1990 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1991 1992 /* Don't call the aenq handler before probe is done */ 1993 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) 1994 ena_com_aenq_intr_handler(adapter->ena_dev, data); 1995 1996 return IRQ_HANDLED; 1997 } 1998 1999 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx 2000 * @irq: interrupt number 2001 * @data: pointer to a network interface private napi device structure 2002 */ 2003 static irqreturn_t ena_intr_msix_io(int irq, void *data) 2004 { 2005 struct ena_napi *ena_napi = data; 2006 2007 ena_napi->first_interrupt = true; 2008 2009 WRITE_ONCE(ena_napi->interrupts_masked, true); 2010 smp_wmb(); /* write interrupts_masked before calling napi */ 2011 2012 napi_schedule_irqoff(&ena_napi->napi); 2013 2014 return IRQ_HANDLED; 2015 } 2016 2017 /* Reserve a single MSI-X vector for management (admin + aenq). 2018 * plus reserve one vector for each potential io queue. 2019 * the number of potential io queues is the minimum of what the device 2020 * supports and the number of vCPUs. 2021 */ 2022 static int ena_enable_msix(struct ena_adapter *adapter) 2023 { 2024 int msix_vecs, irq_cnt; 2025 2026 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 2027 netif_err(adapter, probe, adapter->netdev, 2028 "Error, MSI-X is already enabled\n"); 2029 return -EPERM; 2030 } 2031 2032 /* Reserved the max msix vectors we might need */ 2033 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 2034 netif_dbg(adapter, probe, adapter->netdev, 2035 "Trying to enable MSI-X, vectors %d\n", msix_vecs); 2036 2037 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, 2038 msix_vecs, PCI_IRQ_MSIX); 2039 2040 if (irq_cnt < 0) { 2041 netif_err(adapter, probe, adapter->netdev, 2042 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); 2043 return -ENOSPC; 2044 } 2045 2046 if (irq_cnt != msix_vecs) { 2047 netif_notice(adapter, probe, adapter->netdev, 2048 "Enable only %d MSI-X (out of %d), reduce the number of queues\n", 2049 irq_cnt, msix_vecs); 2050 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; 2051 } 2052 2053 if (ena_init_rx_cpu_rmap(adapter)) 2054 netif_warn(adapter, probe, adapter->netdev, 2055 "Failed to map IRQs to CPUs\n"); 2056 2057 adapter->msix_vecs = irq_cnt; 2058 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); 2059 2060 return 0; 2061 } 2062 2063 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) 2064 { 2065 u32 cpu; 2066 2067 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 2068 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 2069 pci_name(adapter->pdev)); 2070 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = 2071 ena_intr_msix_mgmnt; 2072 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 2073 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 2074 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); 2075 cpu = cpumask_first(cpu_online_mask); 2076 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; 2077 cpumask_set_cpu(cpu, 2078 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); 2079 } 2080 2081 static void ena_setup_io_intr(struct ena_adapter *adapter) 2082 { 2083 struct net_device *netdev; 2084 int irq_idx, i, cpu; 2085 int io_queue_count; 2086 2087 netdev = adapter->netdev; 2088 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2089 2090 for (i = 0; i < io_queue_count; i++) { 2091 irq_idx = ENA_IO_IRQ_IDX(i); 2092 cpu = i % num_online_cpus(); 2093 2094 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 2095 "%s-Tx-Rx-%d", netdev->name, i); 2096 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; 2097 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; 2098 adapter->irq_tbl[irq_idx].vector = 2099 pci_irq_vector(adapter->pdev, irq_idx); 2100 adapter->irq_tbl[irq_idx].cpu = cpu; 2101 2102 cpumask_set_cpu(cpu, 2103 &adapter->irq_tbl[irq_idx].affinity_hint_mask); 2104 } 2105 } 2106 2107 static int ena_request_mgmnt_irq(struct ena_adapter *adapter) 2108 { 2109 unsigned long flags = 0; 2110 struct ena_irq *irq; 2111 int rc; 2112 2113 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 2114 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 2115 irq->data); 2116 if (rc) { 2117 netif_err(adapter, probe, adapter->netdev, 2118 "Failed to request admin irq\n"); 2119 return rc; 2120 } 2121 2122 netif_dbg(adapter, probe, adapter->netdev, 2123 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", 2124 irq->affinity_hint_mask.bits[0], irq->vector); 2125 2126 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 2127 2128 return rc; 2129 } 2130 2131 static int ena_request_io_irq(struct ena_adapter *adapter) 2132 { 2133 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2134 unsigned long flags = 0; 2135 struct ena_irq *irq; 2136 int rc = 0, i, k; 2137 2138 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { 2139 netif_err(adapter, ifup, adapter->netdev, 2140 "Failed to request I/O IRQ: MSI-X is not enabled\n"); 2141 return -EINVAL; 2142 } 2143 2144 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { 2145 irq = &adapter->irq_tbl[i]; 2146 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 2147 irq->data); 2148 if (rc) { 2149 netif_err(adapter, ifup, adapter->netdev, 2150 "Failed to request I/O IRQ. index %d rc %d\n", 2151 i, rc); 2152 goto err; 2153 } 2154 2155 netif_dbg(adapter, ifup, adapter->netdev, 2156 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", 2157 i, irq->affinity_hint_mask.bits[0], irq->vector); 2158 2159 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 2160 } 2161 2162 return rc; 2163 2164 err: 2165 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { 2166 irq = &adapter->irq_tbl[k]; 2167 free_irq(irq->vector, irq->data); 2168 } 2169 2170 return rc; 2171 } 2172 2173 static void ena_free_mgmnt_irq(struct ena_adapter *adapter) 2174 { 2175 struct ena_irq *irq; 2176 2177 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 2178 synchronize_irq(irq->vector); 2179 irq_set_affinity_hint(irq->vector, NULL); 2180 free_irq(irq->vector, irq->data); 2181 } 2182 2183 static void ena_free_io_irq(struct ena_adapter *adapter) 2184 { 2185 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2186 struct ena_irq *irq; 2187 int i; 2188 2189 #ifdef CONFIG_RFS_ACCEL 2190 if (adapter->msix_vecs >= 1) { 2191 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); 2192 adapter->netdev->rx_cpu_rmap = NULL; 2193 } 2194 #endif /* CONFIG_RFS_ACCEL */ 2195 2196 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { 2197 irq = &adapter->irq_tbl[i]; 2198 irq_set_affinity_hint(irq->vector, NULL); 2199 free_irq(irq->vector, irq->data); 2200 } 2201 } 2202 2203 static void ena_disable_msix(struct ena_adapter *adapter) 2204 { 2205 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) 2206 pci_free_irq_vectors(adapter->pdev); 2207 } 2208 2209 static void ena_disable_io_intr_sync(struct ena_adapter *adapter) 2210 { 2211 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2212 int i; 2213 2214 if (!netif_running(adapter->netdev)) 2215 return; 2216 2217 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) 2218 synchronize_irq(adapter->irq_tbl[i].vector); 2219 } 2220 2221 static void ena_del_napi_in_range(struct ena_adapter *adapter, 2222 int first_index, 2223 int count) 2224 { 2225 int i; 2226 2227 for (i = first_index; i < first_index + count; i++) { 2228 netif_napi_del(&adapter->ena_napi[i].napi); 2229 2230 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && 2231 adapter->ena_napi[i].xdp_ring); 2232 } 2233 } 2234 2235 static void ena_init_napi_in_range(struct ena_adapter *adapter, 2236 int first_index, int count) 2237 { 2238 int i; 2239 2240 for (i = first_index; i < first_index + count; i++) { 2241 struct ena_napi *napi = &adapter->ena_napi[i]; 2242 2243 netif_napi_add(adapter->netdev, 2244 &napi->napi, 2245 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll, 2246 ENA_NAPI_BUDGET); 2247 2248 if (!ENA_IS_XDP_INDEX(adapter, i)) { 2249 napi->rx_ring = &adapter->rx_ring[i]; 2250 napi->tx_ring = &adapter->tx_ring[i]; 2251 } else { 2252 napi->xdp_ring = &adapter->tx_ring[i]; 2253 } 2254 napi->qid = i; 2255 } 2256 } 2257 2258 static void ena_napi_disable_in_range(struct ena_adapter *adapter, 2259 int first_index, 2260 int count) 2261 { 2262 int i; 2263 2264 for (i = first_index; i < first_index + count; i++) 2265 napi_disable(&adapter->ena_napi[i].napi); 2266 } 2267 2268 static void ena_napi_enable_in_range(struct ena_adapter *adapter, 2269 int first_index, 2270 int count) 2271 { 2272 int i; 2273 2274 for (i = first_index; i < first_index + count; i++) 2275 napi_enable(&adapter->ena_napi[i].napi); 2276 } 2277 2278 /* Configure the Rx forwarding */ 2279 static int ena_rss_configure(struct ena_adapter *adapter) 2280 { 2281 struct ena_com_dev *ena_dev = adapter->ena_dev; 2282 int rc; 2283 2284 /* In case the RSS table wasn't initialized by probe */ 2285 if (!ena_dev->rss.tbl_log_size) { 2286 rc = ena_rss_init_default(adapter); 2287 if (rc && (rc != -EOPNOTSUPP)) { 2288 netif_err(adapter, ifup, adapter->netdev, 2289 "Failed to init RSS rc: %d\n", rc); 2290 return rc; 2291 } 2292 } 2293 2294 /* Set indirect table */ 2295 rc = ena_com_indirect_table_set(ena_dev); 2296 if (unlikely(rc && rc != -EOPNOTSUPP)) 2297 return rc; 2298 2299 /* Configure hash function (if supported) */ 2300 rc = ena_com_set_hash_function(ena_dev); 2301 if (unlikely(rc && (rc != -EOPNOTSUPP))) 2302 return rc; 2303 2304 /* Configure hash inputs (if supported) */ 2305 rc = ena_com_set_hash_ctrl(ena_dev); 2306 if (unlikely(rc && (rc != -EOPNOTSUPP))) 2307 return rc; 2308 2309 return 0; 2310 } 2311 2312 static int ena_up_complete(struct ena_adapter *adapter) 2313 { 2314 int rc; 2315 2316 rc = ena_rss_configure(adapter); 2317 if (rc) 2318 return rc; 2319 2320 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 2321 2322 ena_refill_all_rx_bufs(adapter); 2323 2324 /* enable transmits */ 2325 netif_tx_start_all_queues(adapter->netdev); 2326 2327 ena_napi_enable_in_range(adapter, 2328 0, 2329 adapter->xdp_num_queues + adapter->num_io_queues); 2330 2331 return 0; 2332 } 2333 2334 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) 2335 { 2336 struct ena_com_create_io_ctx ctx; 2337 struct ena_com_dev *ena_dev; 2338 struct ena_ring *tx_ring; 2339 u32 msix_vector; 2340 u16 ena_qid; 2341 int rc; 2342 2343 ena_dev = adapter->ena_dev; 2344 2345 tx_ring = &adapter->tx_ring[qid]; 2346 msix_vector = ENA_IO_IRQ_IDX(qid); 2347 ena_qid = ENA_IO_TXQ_IDX(qid); 2348 2349 memset(&ctx, 0x0, sizeof(ctx)); 2350 2351 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 2352 ctx.qid = ena_qid; 2353 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 2354 ctx.msix_vector = msix_vector; 2355 ctx.queue_size = tx_ring->ring_size; 2356 ctx.numa_node = cpu_to_node(tx_ring->cpu); 2357 2358 rc = ena_com_create_io_queue(ena_dev, &ctx); 2359 if (rc) { 2360 netif_err(adapter, ifup, adapter->netdev, 2361 "Failed to create I/O TX queue num %d rc: %d\n", 2362 qid, rc); 2363 return rc; 2364 } 2365 2366 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 2367 &tx_ring->ena_com_io_sq, 2368 &tx_ring->ena_com_io_cq); 2369 if (rc) { 2370 netif_err(adapter, ifup, adapter->netdev, 2371 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 2372 qid, rc); 2373 ena_com_destroy_io_queue(ena_dev, ena_qid); 2374 return rc; 2375 } 2376 2377 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); 2378 return rc; 2379 } 2380 2381 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, 2382 int first_index, int count) 2383 { 2384 struct ena_com_dev *ena_dev = adapter->ena_dev; 2385 int rc, i; 2386 2387 for (i = first_index; i < first_index + count; i++) { 2388 rc = ena_create_io_tx_queue(adapter, i); 2389 if (rc) 2390 goto create_err; 2391 } 2392 2393 return 0; 2394 2395 create_err: 2396 while (i-- > first_index) 2397 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 2398 2399 return rc; 2400 } 2401 2402 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) 2403 { 2404 struct ena_com_dev *ena_dev; 2405 struct ena_com_create_io_ctx ctx; 2406 struct ena_ring *rx_ring; 2407 u32 msix_vector; 2408 u16 ena_qid; 2409 int rc; 2410 2411 ena_dev = adapter->ena_dev; 2412 2413 rx_ring = &adapter->rx_ring[qid]; 2414 msix_vector = ENA_IO_IRQ_IDX(qid); 2415 ena_qid = ENA_IO_RXQ_IDX(qid); 2416 2417 memset(&ctx, 0x0, sizeof(ctx)); 2418 2419 ctx.qid = ena_qid; 2420 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 2421 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2422 ctx.msix_vector = msix_vector; 2423 ctx.queue_size = rx_ring->ring_size; 2424 ctx.numa_node = cpu_to_node(rx_ring->cpu); 2425 2426 rc = ena_com_create_io_queue(ena_dev, &ctx); 2427 if (rc) { 2428 netif_err(adapter, ifup, adapter->netdev, 2429 "Failed to create I/O RX queue num %d rc: %d\n", 2430 qid, rc); 2431 return rc; 2432 } 2433 2434 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 2435 &rx_ring->ena_com_io_sq, 2436 &rx_ring->ena_com_io_cq); 2437 if (rc) { 2438 netif_err(adapter, ifup, adapter->netdev, 2439 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 2440 qid, rc); 2441 goto err; 2442 } 2443 2444 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); 2445 2446 return rc; 2447 err: 2448 ena_com_destroy_io_queue(ena_dev, ena_qid); 2449 return rc; 2450 } 2451 2452 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) 2453 { 2454 struct ena_com_dev *ena_dev = adapter->ena_dev; 2455 int rc, i; 2456 2457 for (i = 0; i < adapter->num_io_queues; i++) { 2458 rc = ena_create_io_rx_queue(adapter, i); 2459 if (rc) 2460 goto create_err; 2461 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); 2462 } 2463 2464 return 0; 2465 2466 create_err: 2467 while (i--) { 2468 cancel_work_sync(&adapter->ena_napi[i].dim.work); 2469 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 2470 } 2471 2472 return rc; 2473 } 2474 2475 static void set_io_rings_size(struct ena_adapter *adapter, 2476 int new_tx_size, 2477 int new_rx_size) 2478 { 2479 int i; 2480 2481 for (i = 0; i < adapter->num_io_queues; i++) { 2482 adapter->tx_ring[i].ring_size = new_tx_size; 2483 adapter->rx_ring[i].ring_size = new_rx_size; 2484 } 2485 } 2486 2487 /* This function allows queue allocation to backoff when the system is 2488 * low on memory. If there is not enough memory to allocate io queues 2489 * the driver will try to allocate smaller queues. 2490 * 2491 * The backoff algorithm is as follows: 2492 * 1. Try to allocate TX and RX and if successful. 2493 * 1.1. return success 2494 * 2495 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same). 2496 * 2497 * 3. If TX or RX is smaller than 256 2498 * 3.1. return failure. 2499 * 4. else 2500 * 4.1. go back to 1. 2501 */ 2502 static int create_queues_with_size_backoff(struct ena_adapter *adapter) 2503 { 2504 int rc, cur_rx_ring_size, cur_tx_ring_size; 2505 int new_rx_ring_size, new_tx_ring_size; 2506 2507 /* current queue sizes might be set to smaller than the requested 2508 * ones due to past queue allocation failures. 2509 */ 2510 set_io_rings_size(adapter, adapter->requested_tx_ring_size, 2511 adapter->requested_rx_ring_size); 2512 2513 while (1) { 2514 if (ena_xdp_present(adapter)) { 2515 rc = ena_setup_and_create_all_xdp_queues(adapter); 2516 2517 if (rc) 2518 goto err_setup_tx; 2519 } 2520 rc = ena_setup_tx_resources_in_range(adapter, 2521 0, 2522 adapter->num_io_queues); 2523 if (rc) 2524 goto err_setup_tx; 2525 2526 rc = ena_create_io_tx_queues_in_range(adapter, 2527 0, 2528 adapter->num_io_queues); 2529 if (rc) 2530 goto err_create_tx_queues; 2531 2532 rc = ena_setup_all_rx_resources(adapter); 2533 if (rc) 2534 goto err_setup_rx; 2535 2536 rc = ena_create_all_io_rx_queues(adapter); 2537 if (rc) 2538 goto err_create_rx_queues; 2539 2540 return 0; 2541 2542 err_create_rx_queues: 2543 ena_free_all_io_rx_resources(adapter); 2544 err_setup_rx: 2545 ena_destroy_all_tx_queues(adapter); 2546 err_create_tx_queues: 2547 ena_free_all_io_tx_resources(adapter); 2548 err_setup_tx: 2549 if (rc != -ENOMEM) { 2550 netif_err(adapter, ifup, adapter->netdev, 2551 "Queue creation failed with error code %d\n", 2552 rc); 2553 return rc; 2554 } 2555 2556 cur_tx_ring_size = adapter->tx_ring[0].ring_size; 2557 cur_rx_ring_size = adapter->rx_ring[0].ring_size; 2558 2559 netif_err(adapter, ifup, adapter->netdev, 2560 "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 2561 cur_tx_ring_size, cur_rx_ring_size); 2562 2563 new_tx_ring_size = cur_tx_ring_size; 2564 new_rx_ring_size = cur_rx_ring_size; 2565 2566 /* Decrease the size of the larger queue, or 2567 * decrease both if they are the same size. 2568 */ 2569 if (cur_rx_ring_size <= cur_tx_ring_size) 2570 new_tx_ring_size = cur_tx_ring_size / 2; 2571 if (cur_rx_ring_size >= cur_tx_ring_size) 2572 new_rx_ring_size = cur_rx_ring_size / 2; 2573 2574 if (new_tx_ring_size < ENA_MIN_RING_SIZE || 2575 new_rx_ring_size < ENA_MIN_RING_SIZE) { 2576 netif_err(adapter, ifup, adapter->netdev, 2577 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", 2578 ENA_MIN_RING_SIZE); 2579 return rc; 2580 } 2581 2582 netif_err(adapter, ifup, adapter->netdev, 2583 "Retrying queue creation with sizes TX=%d, RX=%d\n", 2584 new_tx_ring_size, 2585 new_rx_ring_size); 2586 2587 set_io_rings_size(adapter, new_tx_ring_size, 2588 new_rx_ring_size); 2589 } 2590 } 2591 2592 static int ena_up(struct ena_adapter *adapter) 2593 { 2594 int io_queue_count, rc, i; 2595 2596 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); 2597 2598 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2599 ena_setup_io_intr(adapter); 2600 2601 /* napi poll functions should be initialized before running 2602 * request_irq(), to handle a rare condition where there is a pending 2603 * interrupt, causing the ISR to fire immediately while the poll 2604 * function wasn't set yet, causing a null dereference 2605 */ 2606 ena_init_napi_in_range(adapter, 0, io_queue_count); 2607 2608 rc = ena_request_io_irq(adapter); 2609 if (rc) 2610 goto err_req_irq; 2611 2612 rc = create_queues_with_size_backoff(adapter); 2613 if (rc) 2614 goto err_create_queues_with_backoff; 2615 2616 rc = ena_up_complete(adapter); 2617 if (rc) 2618 goto err_up; 2619 2620 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 2621 netif_carrier_on(adapter->netdev); 2622 2623 ena_increase_stat(&adapter->dev_stats.interface_up, 1, 2624 &adapter->syncp); 2625 2626 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2627 2628 /* Enable completion queues interrupt */ 2629 for (i = 0; i < adapter->num_io_queues; i++) 2630 ena_unmask_interrupt(&adapter->tx_ring[i], 2631 &adapter->rx_ring[i]); 2632 2633 /* schedule napi in case we had pending packets 2634 * from the last time we disable napi 2635 */ 2636 for (i = 0; i < io_queue_count; i++) 2637 napi_schedule(&adapter->ena_napi[i].napi); 2638 2639 return rc; 2640 2641 err_up: 2642 ena_destroy_all_tx_queues(adapter); 2643 ena_free_all_io_tx_resources(adapter); 2644 ena_destroy_all_rx_queues(adapter); 2645 ena_free_all_io_rx_resources(adapter); 2646 err_create_queues_with_backoff: 2647 ena_free_io_irq(adapter); 2648 err_req_irq: 2649 ena_del_napi_in_range(adapter, 0, io_queue_count); 2650 2651 return rc; 2652 } 2653 2654 static void ena_down(struct ena_adapter *adapter) 2655 { 2656 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; 2657 2658 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); 2659 2660 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2661 2662 ena_increase_stat(&adapter->dev_stats.interface_down, 1, 2663 &adapter->syncp); 2664 2665 netif_carrier_off(adapter->netdev); 2666 netif_tx_disable(adapter->netdev); 2667 2668 /* After this point the napi handler won't enable the tx queue */ 2669 ena_napi_disable_in_range(adapter, 0, io_queue_count); 2670 2671 /* After destroy the queue there won't be any new interrupts */ 2672 2673 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { 2674 int rc; 2675 2676 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2677 if (rc) 2678 netif_err(adapter, ifdown, adapter->netdev, 2679 "Device reset failed\n"); 2680 /* stop submitting admin commands on a device that was reset */ 2681 ena_com_set_admin_running_state(adapter->ena_dev, false); 2682 } 2683 2684 ena_destroy_all_io_queues(adapter); 2685 2686 ena_disable_io_intr_sync(adapter); 2687 ena_free_io_irq(adapter); 2688 ena_del_napi_in_range(adapter, 0, io_queue_count); 2689 2690 ena_free_all_tx_bufs(adapter); 2691 ena_free_all_rx_bufs(adapter); 2692 ena_free_all_io_tx_resources(adapter); 2693 ena_free_all_io_rx_resources(adapter); 2694 } 2695 2696 /* ena_open - Called when a network interface is made active 2697 * @netdev: network interface device structure 2698 * 2699 * Returns 0 on success, negative value on failure 2700 * 2701 * The open entry point is called when a network interface is made 2702 * active by the system (IFF_UP). At this point all resources needed 2703 * for transmit and receive operations are allocated, the interrupt 2704 * handler is registered with the OS, the watchdog timer is started, 2705 * and the stack is notified that the interface is ready. 2706 */ 2707 static int ena_open(struct net_device *netdev) 2708 { 2709 struct ena_adapter *adapter = netdev_priv(netdev); 2710 int rc; 2711 2712 /* Notify the stack of the actual queue counts. */ 2713 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); 2714 if (rc) { 2715 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); 2716 return rc; 2717 } 2718 2719 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); 2720 if (rc) { 2721 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); 2722 return rc; 2723 } 2724 2725 rc = ena_up(adapter); 2726 if (rc) 2727 return rc; 2728 2729 return rc; 2730 } 2731 2732 /* ena_close - Disables a network interface 2733 * @netdev: network interface device structure 2734 * 2735 * Returns 0, this is not allowed to fail 2736 * 2737 * The close entry point is called when an interface is de-activated 2738 * by the OS. The hardware is still under the drivers control, but 2739 * needs to be disabled. A global MAC reset is issued to stop the 2740 * hardware, and all transmit and receive resources are freed. 2741 */ 2742 static int ena_close(struct net_device *netdev) 2743 { 2744 struct ena_adapter *adapter = netdev_priv(netdev); 2745 2746 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 2747 2748 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 2749 return 0; 2750 2751 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2752 ena_down(adapter); 2753 2754 /* Check for device status and issue reset if needed*/ 2755 check_for_admin_com_state(adapter); 2756 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 2757 netif_err(adapter, ifdown, adapter->netdev, 2758 "Destroy failure, restarting device\n"); 2759 ena_dump_stats_to_dmesg(adapter); 2760 /* rtnl lock already obtained in dev_ioctl() layer */ 2761 ena_destroy_device(adapter, false); 2762 ena_restore_device(adapter); 2763 } 2764 2765 return 0; 2766 } 2767 2768 int ena_update_queue_sizes(struct ena_adapter *adapter, 2769 u32 new_tx_size, 2770 u32 new_rx_size) 2771 { 2772 bool dev_was_up; 2773 2774 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2775 ena_close(adapter->netdev); 2776 adapter->requested_tx_ring_size = new_tx_size; 2777 adapter->requested_rx_ring_size = new_rx_size; 2778 ena_init_io_rings(adapter, 2779 0, 2780 adapter->xdp_num_queues + 2781 adapter->num_io_queues); 2782 return dev_was_up ? ena_up(adapter) : 0; 2783 } 2784 2785 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) 2786 { 2787 struct ena_com_dev *ena_dev = adapter->ena_dev; 2788 int prev_channel_count; 2789 bool dev_was_up; 2790 2791 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2792 ena_close(adapter->netdev); 2793 prev_channel_count = adapter->num_io_queues; 2794 adapter->num_io_queues = new_channel_count; 2795 if (ena_xdp_present(adapter) && 2796 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) { 2797 adapter->xdp_first_ring = new_channel_count; 2798 adapter->xdp_num_queues = new_channel_count; 2799 if (prev_channel_count > new_channel_count) 2800 ena_xdp_exchange_program_rx_in_range(adapter, 2801 NULL, 2802 new_channel_count, 2803 prev_channel_count); 2804 else 2805 ena_xdp_exchange_program_rx_in_range(adapter, 2806 adapter->xdp_bpf_prog, 2807 prev_channel_count, 2808 new_channel_count); 2809 } 2810 2811 /* We need to destroy the rss table so that the indirection 2812 * table will be reinitialized by ena_up() 2813 */ 2814 ena_com_rss_destroy(ena_dev); 2815 ena_init_io_rings(adapter, 2816 0, 2817 adapter->xdp_num_queues + 2818 adapter->num_io_queues); 2819 return dev_was_up ? ena_open(adapter->netdev) : 0; 2820 } 2821 2822 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, 2823 struct sk_buff *skb, 2824 bool disable_meta_caching) 2825 { 2826 u32 mss = skb_shinfo(skb)->gso_size; 2827 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 2828 u8 l4_protocol = 0; 2829 2830 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { 2831 ena_tx_ctx->l4_csum_enable = 1; 2832 if (mss) { 2833 ena_tx_ctx->tso_enable = 1; 2834 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; 2835 ena_tx_ctx->l4_csum_partial = 0; 2836 } else { 2837 ena_tx_ctx->tso_enable = 0; 2838 ena_meta->l4_hdr_len = 0; 2839 ena_tx_ctx->l4_csum_partial = 1; 2840 } 2841 2842 switch (ip_hdr(skb)->version) { 2843 case IPVERSION: 2844 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 2845 if (ip_hdr(skb)->frag_off & htons(IP_DF)) 2846 ena_tx_ctx->df = 1; 2847 if (mss) 2848 ena_tx_ctx->l3_csum_enable = 1; 2849 l4_protocol = ip_hdr(skb)->protocol; 2850 break; 2851 case 6: 2852 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 2853 l4_protocol = ipv6_hdr(skb)->nexthdr; 2854 break; 2855 default: 2856 break; 2857 } 2858 2859 if (l4_protocol == IPPROTO_TCP) 2860 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 2861 else 2862 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 2863 2864 ena_meta->mss = mss; 2865 ena_meta->l3_hdr_len = skb_network_header_len(skb); 2866 ena_meta->l3_hdr_offset = skb_network_offset(skb); 2867 ena_tx_ctx->meta_valid = 1; 2868 } else if (disable_meta_caching) { 2869 memset(ena_meta, 0, sizeof(*ena_meta)); 2870 ena_tx_ctx->meta_valid = 1; 2871 } else { 2872 ena_tx_ctx->meta_valid = 0; 2873 } 2874 } 2875 2876 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, 2877 struct sk_buff *skb) 2878 { 2879 int num_frags, header_len, rc; 2880 2881 num_frags = skb_shinfo(skb)->nr_frags; 2882 header_len = skb_headlen(skb); 2883 2884 if (num_frags < tx_ring->sgl_size) 2885 return 0; 2886 2887 if ((num_frags == tx_ring->sgl_size) && 2888 (header_len < tx_ring->tx_max_header_size)) 2889 return 0; 2890 2891 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); 2892 2893 rc = skb_linearize(skb); 2894 if (unlikely(rc)) { 2895 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, 2896 &tx_ring->syncp); 2897 } 2898 2899 return rc; 2900 } 2901 2902 static int ena_tx_map_skb(struct ena_ring *tx_ring, 2903 struct ena_tx_buffer *tx_info, 2904 struct sk_buff *skb, 2905 void **push_hdr, 2906 u16 *header_len) 2907 { 2908 struct ena_adapter *adapter = tx_ring->adapter; 2909 struct ena_com_buf *ena_buf; 2910 dma_addr_t dma; 2911 u32 skb_head_len, frag_len, last_frag; 2912 u16 push_len = 0; 2913 u16 delta = 0; 2914 int i = 0; 2915 2916 skb_head_len = skb_headlen(skb); 2917 tx_info->skb = skb; 2918 ena_buf = tx_info->bufs; 2919 2920 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2921 /* When the device is LLQ mode, the driver will copy 2922 * the header into the device memory space. 2923 * the ena_com layer assume the header is in a linear 2924 * memory space. 2925 * This assumption might be wrong since part of the header 2926 * can be in the fragmented buffers. 2927 * Use skb_header_pointer to make sure the header is in a 2928 * linear memory space. 2929 */ 2930 2931 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); 2932 *push_hdr = skb_header_pointer(skb, 0, push_len, 2933 tx_ring->push_buf_intermediate_buf); 2934 *header_len = push_len; 2935 if (unlikely(skb->data != *push_hdr)) { 2936 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, 2937 &tx_ring->syncp); 2938 2939 delta = push_len - skb_head_len; 2940 } 2941 } else { 2942 *push_hdr = NULL; 2943 *header_len = min_t(u32, skb_head_len, 2944 tx_ring->tx_max_header_size); 2945 } 2946 2947 netif_dbg(adapter, tx_queued, adapter->netdev, 2948 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, 2949 *push_hdr, push_len); 2950 2951 if (skb_head_len > push_len) { 2952 dma = dma_map_single(tx_ring->dev, skb->data + push_len, 2953 skb_head_len - push_len, DMA_TO_DEVICE); 2954 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 2955 goto error_report_dma_error; 2956 2957 ena_buf->paddr = dma; 2958 ena_buf->len = skb_head_len - push_len; 2959 2960 ena_buf++; 2961 tx_info->num_of_bufs++; 2962 tx_info->map_linear_data = 1; 2963 } else { 2964 tx_info->map_linear_data = 0; 2965 } 2966 2967 last_frag = skb_shinfo(skb)->nr_frags; 2968 2969 for (i = 0; i < last_frag; i++) { 2970 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2971 2972 frag_len = skb_frag_size(frag); 2973 2974 if (unlikely(delta >= frag_len)) { 2975 delta -= frag_len; 2976 continue; 2977 } 2978 2979 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, 2980 frag_len - delta, DMA_TO_DEVICE); 2981 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) 2982 goto error_report_dma_error; 2983 2984 ena_buf->paddr = dma; 2985 ena_buf->len = frag_len - delta; 2986 ena_buf++; 2987 tx_info->num_of_bufs++; 2988 delta = 0; 2989 } 2990 2991 return 0; 2992 2993 error_report_dma_error: 2994 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, 2995 &tx_ring->syncp); 2996 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); 2997 2998 tx_info->skb = NULL; 2999 3000 tx_info->num_of_bufs += i; 3001 ena_unmap_tx_buff(tx_ring, tx_info); 3002 3003 return -EINVAL; 3004 } 3005 3006 /* Called with netif_tx_lock. */ 3007 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) 3008 { 3009 struct ena_adapter *adapter = netdev_priv(dev); 3010 struct ena_tx_buffer *tx_info; 3011 struct ena_com_tx_ctx ena_tx_ctx; 3012 struct ena_ring *tx_ring; 3013 struct netdev_queue *txq; 3014 void *push_hdr; 3015 u16 next_to_use, req_id, header_len; 3016 int qid, rc; 3017 3018 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); 3019 /* Determine which tx ring we will be placed on */ 3020 qid = skb_get_queue_mapping(skb); 3021 tx_ring = &adapter->tx_ring[qid]; 3022 txq = netdev_get_tx_queue(dev, qid); 3023 3024 rc = ena_check_and_linearize_skb(tx_ring, skb); 3025 if (unlikely(rc)) 3026 goto error_drop_packet; 3027 3028 skb_tx_timestamp(skb); 3029 3030 next_to_use = tx_ring->next_to_use; 3031 req_id = tx_ring->free_ids[next_to_use]; 3032 tx_info = &tx_ring->tx_buffer_info[req_id]; 3033 tx_info->num_of_bufs = 0; 3034 3035 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); 3036 3037 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); 3038 if (unlikely(rc)) 3039 goto error_drop_packet; 3040 3041 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 3042 ena_tx_ctx.ena_bufs = tx_info->bufs; 3043 ena_tx_ctx.push_header = push_hdr; 3044 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 3045 ena_tx_ctx.req_id = req_id; 3046 ena_tx_ctx.header_len = header_len; 3047 3048 /* set flags and meta data */ 3049 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); 3050 3051 rc = ena_xmit_common(dev, 3052 tx_ring, 3053 tx_info, 3054 &ena_tx_ctx, 3055 next_to_use, 3056 skb->len); 3057 if (rc) 3058 goto error_unmap_dma; 3059 3060 netdev_tx_sent_queue(txq, skb->len); 3061 3062 /* stop the queue when no more space available, the packet can have up 3063 * to sgl_size + 2. one for the meta descriptor and one for header 3064 * (if the header is larger than tx_max_header_size). 3065 */ 3066 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3067 tx_ring->sgl_size + 2))) { 3068 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", 3069 __func__, qid); 3070 3071 netif_tx_stop_queue(txq); 3072 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, 3073 &tx_ring->syncp); 3074 3075 /* There is a rare condition where this function decide to 3076 * stop the queue but meanwhile clean_tx_irq updates 3077 * next_to_completion and terminates. 3078 * The queue will remain stopped forever. 3079 * To solve this issue add a mb() to make sure that 3080 * netif_tx_stop_queue() write is vissible before checking if 3081 * there is additional space in the queue. 3082 */ 3083 smp_mb(); 3084 3085 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3086 ENA_TX_WAKEUP_THRESH)) { 3087 netif_tx_wake_queue(txq); 3088 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, 3089 &tx_ring->syncp); 3090 } 3091 } 3092 3093 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { 3094 /* trigger the dma engine. ena_com_write_sq_doorbell() 3095 * has a mb 3096 */ 3097 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3098 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, 3099 &tx_ring->syncp); 3100 } 3101 3102 return NETDEV_TX_OK; 3103 3104 error_unmap_dma: 3105 ena_unmap_tx_buff(tx_ring, tx_info); 3106 tx_info->skb = NULL; 3107 3108 error_drop_packet: 3109 dev_kfree_skb(skb); 3110 return NETDEV_TX_OK; 3111 } 3112 3113 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 3114 struct net_device *sb_dev) 3115 { 3116 u16 qid; 3117 /* we suspect that this is good for in--kernel network services that 3118 * want to loop incoming skb rx to tx in normal user generated traffic, 3119 * most probably we will not get to this 3120 */ 3121 if (skb_rx_queue_recorded(skb)) 3122 qid = skb_get_rx_queue(skb); 3123 else 3124 qid = netdev_pick_tx(dev, skb, NULL); 3125 3126 return qid; 3127 } 3128 3129 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3130 { 3131 struct device *dev = &pdev->dev; 3132 struct ena_admin_host_info *host_info; 3133 int rc; 3134 3135 /* Allocate only the host info */ 3136 rc = ena_com_allocate_host_info(ena_dev); 3137 if (rc) { 3138 dev_err(dev, "Cannot allocate host info\n"); 3139 return; 3140 } 3141 3142 host_info = ena_dev->host_attr.host_info; 3143 3144 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; 3145 host_info->os_type = ENA_ADMIN_OS_LINUX; 3146 host_info->kernel_ver = LINUX_VERSION_CODE; 3147 strlcpy(host_info->kernel_ver_str, utsname()->version, 3148 sizeof(host_info->kernel_ver_str) - 1); 3149 host_info->os_dist = 0; 3150 strncpy(host_info->os_dist_str, utsname()->release, 3151 sizeof(host_info->os_dist_str) - 1); 3152 host_info->driver_version = 3153 (DRV_MODULE_GEN_MAJOR) | 3154 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 3155 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | 3156 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); 3157 host_info->num_cpus = num_online_cpus(); 3158 3159 host_info->driver_supported_features = 3160 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 3161 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | 3162 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | 3163 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 3164 3165 rc = ena_com_set_host_attributes(ena_dev); 3166 if (rc) { 3167 if (rc == -EOPNOTSUPP) 3168 dev_warn(dev, "Cannot set host attributes\n"); 3169 else 3170 dev_err(dev, "Cannot set host attributes\n"); 3171 3172 goto err; 3173 } 3174 3175 return; 3176 3177 err: 3178 ena_com_delete_host_info(ena_dev); 3179 } 3180 3181 static void ena_config_debug_area(struct ena_adapter *adapter) 3182 { 3183 u32 debug_area_size; 3184 int rc, ss_count; 3185 3186 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); 3187 if (ss_count <= 0) { 3188 netif_err(adapter, drv, adapter->netdev, 3189 "SS count is negative\n"); 3190 return; 3191 } 3192 3193 /* allocate 32 bytes for each string and 64bit for the value */ 3194 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 3195 3196 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); 3197 if (rc) { 3198 netif_err(adapter, drv, adapter->netdev, 3199 "Cannot allocate debug area\n"); 3200 return; 3201 } 3202 3203 rc = ena_com_set_host_attributes(adapter->ena_dev); 3204 if (rc) { 3205 if (rc == -EOPNOTSUPP) 3206 netif_warn(adapter, drv, adapter->netdev, 3207 "Cannot set host attributes\n"); 3208 else 3209 netif_err(adapter, drv, adapter->netdev, 3210 "Cannot set host attributes\n"); 3211 goto err; 3212 } 3213 3214 return; 3215 err: 3216 ena_com_delete_debug_area(adapter->ena_dev); 3217 } 3218 3219 int ena_update_hw_stats(struct ena_adapter *adapter) 3220 { 3221 int rc = 0; 3222 3223 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); 3224 if (rc) { 3225 dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n"); 3226 return rc; 3227 } 3228 3229 return 0; 3230 } 3231 3232 static void ena_get_stats64(struct net_device *netdev, 3233 struct rtnl_link_stats64 *stats) 3234 { 3235 struct ena_adapter *adapter = netdev_priv(netdev); 3236 struct ena_ring *rx_ring, *tx_ring; 3237 unsigned int start; 3238 u64 rx_drops; 3239 u64 tx_drops; 3240 int i; 3241 3242 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 3243 return; 3244 3245 for (i = 0; i < adapter->num_io_queues; i++) { 3246 u64 bytes, packets; 3247 3248 tx_ring = &adapter->tx_ring[i]; 3249 3250 do { 3251 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 3252 packets = tx_ring->tx_stats.cnt; 3253 bytes = tx_ring->tx_stats.bytes; 3254 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 3255 3256 stats->tx_packets += packets; 3257 stats->tx_bytes += bytes; 3258 3259 rx_ring = &adapter->rx_ring[i]; 3260 3261 do { 3262 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 3263 packets = rx_ring->rx_stats.cnt; 3264 bytes = rx_ring->rx_stats.bytes; 3265 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 3266 3267 stats->rx_packets += packets; 3268 stats->rx_bytes += bytes; 3269 } 3270 3271 do { 3272 start = u64_stats_fetch_begin_irq(&adapter->syncp); 3273 rx_drops = adapter->dev_stats.rx_drops; 3274 tx_drops = adapter->dev_stats.tx_drops; 3275 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); 3276 3277 stats->rx_dropped = rx_drops; 3278 stats->tx_dropped = tx_drops; 3279 3280 stats->multicast = 0; 3281 stats->collisions = 0; 3282 3283 stats->rx_length_errors = 0; 3284 stats->rx_crc_errors = 0; 3285 stats->rx_frame_errors = 0; 3286 stats->rx_fifo_errors = 0; 3287 stats->rx_missed_errors = 0; 3288 stats->tx_window_errors = 0; 3289 3290 stats->rx_errors = 0; 3291 stats->tx_errors = 0; 3292 } 3293 3294 static const struct net_device_ops ena_netdev_ops = { 3295 .ndo_open = ena_open, 3296 .ndo_stop = ena_close, 3297 .ndo_start_xmit = ena_start_xmit, 3298 .ndo_select_queue = ena_select_queue, 3299 .ndo_get_stats64 = ena_get_stats64, 3300 .ndo_tx_timeout = ena_tx_timeout, 3301 .ndo_change_mtu = ena_change_mtu, 3302 .ndo_set_mac_address = NULL, 3303 .ndo_validate_addr = eth_validate_addr, 3304 .ndo_bpf = ena_xdp, 3305 .ndo_xdp_xmit = ena_xdp_xmit, 3306 }; 3307 3308 static int ena_device_validate_params(struct ena_adapter *adapter, 3309 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3310 { 3311 struct net_device *netdev = adapter->netdev; 3312 int rc; 3313 3314 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, 3315 adapter->mac_addr); 3316 if (!rc) { 3317 netif_err(adapter, drv, netdev, 3318 "Error, mac address are different\n"); 3319 return -EINVAL; 3320 } 3321 3322 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { 3323 netif_err(adapter, drv, netdev, 3324 "Error, device max mtu is smaller than netdev MTU\n"); 3325 return -EINVAL; 3326 } 3327 3328 return 0; 3329 } 3330 3331 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 3332 { 3333 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 3334 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 3335 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 3336 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 3337 llq_config->llq_ring_entry_size_value = 128; 3338 } 3339 3340 static int ena_set_queues_placement_policy(struct pci_dev *pdev, 3341 struct ena_com_dev *ena_dev, 3342 struct ena_admin_feature_llq_desc *llq, 3343 struct ena_llq_configurations *llq_default_configurations) 3344 { 3345 int rc; 3346 u32 llq_feature_mask; 3347 3348 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 3349 if (!(ena_dev->supported_features & llq_feature_mask)) { 3350 dev_err(&pdev->dev, 3351 "LLQ is not supported Fallback to host mode policy.\n"); 3352 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3353 return 0; 3354 } 3355 3356 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 3357 if (unlikely(rc)) { 3358 dev_err(&pdev->dev, 3359 "Failed to configure the device mode. Fallback to host mode policy.\n"); 3360 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3361 } 3362 3363 return 0; 3364 } 3365 3366 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, 3367 int bars) 3368 { 3369 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); 3370 3371 if (!has_mem_bar) { 3372 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3373 dev_err(&pdev->dev, 3374 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); 3375 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3376 } 3377 3378 return 0; 3379 } 3380 3381 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, 3382 pci_resource_start(pdev, ENA_MEM_BAR), 3383 pci_resource_len(pdev, ENA_MEM_BAR)); 3384 3385 if (!ena_dev->mem_bar) 3386 return -EFAULT; 3387 3388 return 0; 3389 } 3390 3391 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, 3392 struct ena_com_dev_get_features_ctx *get_feat_ctx, 3393 bool *wd_state) 3394 { 3395 struct ena_llq_configurations llq_config; 3396 struct device *dev = &pdev->dev; 3397 bool readless_supported; 3398 u32 aenq_groups; 3399 int dma_width; 3400 int rc; 3401 3402 rc = ena_com_mmio_reg_read_request_init(ena_dev); 3403 if (rc) { 3404 dev_err(dev, "Failed to init mmio read less\n"); 3405 return rc; 3406 } 3407 3408 /* The PCIe configuration space revision id indicate if mmio reg 3409 * read is disabled 3410 */ 3411 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); 3412 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 3413 3414 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 3415 if (rc) { 3416 dev_err(dev, "Can not reset device\n"); 3417 goto err_mmio_read_less; 3418 } 3419 3420 rc = ena_com_validate_version(ena_dev); 3421 if (rc) { 3422 dev_err(dev, "Device version is too low\n"); 3423 goto err_mmio_read_less; 3424 } 3425 3426 dma_width = ena_com_get_dma_width(ena_dev); 3427 if (dma_width < 0) { 3428 dev_err(dev, "Invalid dma width value %d", dma_width); 3429 rc = dma_width; 3430 goto err_mmio_read_less; 3431 } 3432 3433 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); 3434 if (rc) { 3435 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); 3436 goto err_mmio_read_less; 3437 } 3438 3439 /* ENA admin level init */ 3440 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 3441 if (rc) { 3442 dev_err(dev, 3443 "Can not initialize ena admin queue with device\n"); 3444 goto err_mmio_read_less; 3445 } 3446 3447 /* To enable the msix interrupts the driver needs to know the number 3448 * of queues. So the driver uses polling mode to retrieve this 3449 * information 3450 */ 3451 ena_com_set_admin_polling_mode(ena_dev, true); 3452 3453 ena_config_host_info(ena_dev, pdev); 3454 3455 /* Get Device Attributes*/ 3456 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 3457 if (rc) { 3458 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); 3459 goto err_admin_init; 3460 } 3461 3462 /* Try to turn all the available aenq groups */ 3463 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 3464 BIT(ENA_ADMIN_FATAL_ERROR) | 3465 BIT(ENA_ADMIN_WARNING) | 3466 BIT(ENA_ADMIN_NOTIFICATION) | 3467 BIT(ENA_ADMIN_KEEP_ALIVE); 3468 3469 aenq_groups &= get_feat_ctx->aenq.supported_groups; 3470 3471 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 3472 if (rc) { 3473 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); 3474 goto err_admin_init; 3475 } 3476 3477 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 3478 3479 set_default_llq_configurations(&llq_config); 3480 3481 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, 3482 &llq_config); 3483 if (rc) { 3484 dev_err(dev, "ENA device init failed\n"); 3485 goto err_admin_init; 3486 } 3487 3488 return 0; 3489 3490 err_admin_init: 3491 ena_com_delete_host_info(ena_dev); 3492 ena_com_admin_destroy(ena_dev); 3493 err_mmio_read_less: 3494 ena_com_mmio_reg_read_request_destroy(ena_dev); 3495 3496 return rc; 3497 } 3498 3499 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) 3500 { 3501 struct ena_com_dev *ena_dev = adapter->ena_dev; 3502 struct device *dev = &adapter->pdev->dev; 3503 int rc; 3504 3505 rc = ena_enable_msix(adapter); 3506 if (rc) { 3507 dev_err(dev, "Can not reserve msix vectors\n"); 3508 return rc; 3509 } 3510 3511 ena_setup_mgmnt_intr(adapter); 3512 3513 rc = ena_request_mgmnt_irq(adapter); 3514 if (rc) { 3515 dev_err(dev, "Can not setup management interrupts\n"); 3516 goto err_disable_msix; 3517 } 3518 3519 ena_com_set_admin_polling_mode(ena_dev, false); 3520 3521 ena_com_admin_aenq_enable(ena_dev); 3522 3523 return 0; 3524 3525 err_disable_msix: 3526 ena_disable_msix(adapter); 3527 3528 return rc; 3529 } 3530 3531 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) 3532 { 3533 struct net_device *netdev = adapter->netdev; 3534 struct ena_com_dev *ena_dev = adapter->ena_dev; 3535 bool dev_up; 3536 3537 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 3538 return; 3539 3540 netif_carrier_off(netdev); 3541 3542 del_timer_sync(&adapter->timer_service); 3543 3544 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 3545 adapter->dev_up_before_reset = dev_up; 3546 if (!graceful) 3547 ena_com_set_admin_running_state(ena_dev, false); 3548 3549 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 3550 ena_down(adapter); 3551 3552 /* Stop the device from sending AENQ events (in case reset flag is set 3553 * and device is up, ena_down() already reset the device. 3554 */ 3555 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 3556 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 3557 3558 ena_free_mgmnt_irq(adapter); 3559 3560 ena_disable_msix(adapter); 3561 3562 ena_com_abort_admin_commands(ena_dev); 3563 3564 ena_com_wait_for_abort_completion(ena_dev); 3565 3566 ena_com_admin_destroy(ena_dev); 3567 3568 ena_com_mmio_reg_read_request_destroy(ena_dev); 3569 3570 /* return reset reason to default value */ 3571 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 3572 3573 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3574 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3575 } 3576 3577 static int ena_restore_device(struct ena_adapter *adapter) 3578 { 3579 struct ena_com_dev_get_features_ctx get_feat_ctx; 3580 struct ena_com_dev *ena_dev = adapter->ena_dev; 3581 struct pci_dev *pdev = adapter->pdev; 3582 bool wd_state; 3583 int rc; 3584 3585 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 3586 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); 3587 if (rc) { 3588 dev_err(&pdev->dev, "Can not initialize device\n"); 3589 goto err; 3590 } 3591 adapter->wd_state = wd_state; 3592 3593 rc = ena_device_validate_params(adapter, &get_feat_ctx); 3594 if (rc) { 3595 dev_err(&pdev->dev, "Validation of device parameters failed\n"); 3596 goto err_device_destroy; 3597 } 3598 3599 rc = ena_enable_msix_and_set_admin_interrupts(adapter); 3600 if (rc) { 3601 dev_err(&pdev->dev, "Enable MSI-X failed\n"); 3602 goto err_device_destroy; 3603 } 3604 /* If the interface was up before the reset bring it up */ 3605 if (adapter->dev_up_before_reset) { 3606 rc = ena_up(adapter); 3607 if (rc) { 3608 dev_err(&pdev->dev, "Failed to create I/O queues\n"); 3609 goto err_disable_msix; 3610 } 3611 } 3612 3613 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3614 3615 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 3616 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) 3617 netif_carrier_on(adapter->netdev); 3618 3619 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 3620 adapter->last_keep_alive_jiffies = jiffies; 3621 3622 dev_err(&pdev->dev, "Device reset completed successfully\n"); 3623 3624 return rc; 3625 err_disable_msix: 3626 ena_free_mgmnt_irq(adapter); 3627 ena_disable_msix(adapter); 3628 err_device_destroy: 3629 ena_com_abort_admin_commands(ena_dev); 3630 ena_com_wait_for_abort_completion(ena_dev); 3631 ena_com_admin_destroy(ena_dev); 3632 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 3633 ena_com_mmio_reg_read_request_destroy(ena_dev); 3634 err: 3635 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3636 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 3637 dev_err(&pdev->dev, 3638 "Reset attempt failed. Can not reset the device\n"); 3639 3640 return rc; 3641 } 3642 3643 static void ena_fw_reset_device(struct work_struct *work) 3644 { 3645 struct ena_adapter *adapter = 3646 container_of(work, struct ena_adapter, reset_task); 3647 3648 rtnl_lock(); 3649 3650 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 3651 ena_destroy_device(adapter, false); 3652 ena_restore_device(adapter); 3653 } 3654 3655 rtnl_unlock(); 3656 } 3657 3658 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3659 struct ena_ring *rx_ring) 3660 { 3661 if (likely(rx_ring->first_interrupt)) 3662 return 0; 3663 3664 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3665 return 0; 3666 3667 rx_ring->no_interrupt_event_cnt++; 3668 3669 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 3670 netif_err(adapter, rx_err, adapter->netdev, 3671 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 3672 rx_ring->qid); 3673 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3674 smp_mb__before_atomic(); 3675 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3676 return -EIO; 3677 } 3678 3679 return 0; 3680 } 3681 3682 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 3683 struct ena_ring *tx_ring) 3684 { 3685 struct ena_tx_buffer *tx_buf; 3686 unsigned long last_jiffies; 3687 u32 missed_tx = 0; 3688 int i, rc = 0; 3689 3690 for (i = 0; i < tx_ring->ring_size; i++) { 3691 tx_buf = &tx_ring->tx_buffer_info[i]; 3692 last_jiffies = tx_buf->last_jiffies; 3693 3694 if (last_jiffies == 0) 3695 /* no pending Tx at this location */ 3696 continue; 3697 3698 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + 3699 2 * adapter->missing_tx_completion_to))) { 3700 /* If after graceful period interrupt is still not 3701 * received, we schedule a reset 3702 */ 3703 netif_err(adapter, tx_err, adapter->netdev, 3704 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", 3705 tx_ring->qid); 3706 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3707 smp_mb__before_atomic(); 3708 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3709 return -EIO; 3710 } 3711 3712 if (unlikely(time_is_before_jiffies(last_jiffies + 3713 adapter->missing_tx_completion_to))) { 3714 if (!tx_buf->print_once) 3715 netif_notice(adapter, tx_err, adapter->netdev, 3716 "Found a Tx that wasn't completed on time, qid %d, index %d.\n", 3717 tx_ring->qid, i); 3718 3719 tx_buf->print_once = 1; 3720 missed_tx++; 3721 } 3722 } 3723 3724 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { 3725 netif_err(adapter, tx_err, adapter->netdev, 3726 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", 3727 missed_tx, 3728 adapter->missing_tx_completion_threshold); 3729 adapter->reset_reason = 3730 ENA_REGS_RESET_MISS_TX_CMPL; 3731 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3732 rc = -EIO; 3733 } 3734 3735 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, 3736 &tx_ring->syncp); 3737 3738 return rc; 3739 } 3740 3741 static void check_for_missing_completions(struct ena_adapter *adapter) 3742 { 3743 struct ena_ring *tx_ring; 3744 struct ena_ring *rx_ring; 3745 int i, budget, rc; 3746 int io_queue_count; 3747 3748 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; 3749 /* Make sure the driver doesn't turn the device in other process */ 3750 smp_rmb(); 3751 3752 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 3753 return; 3754 3755 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 3756 return; 3757 3758 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 3759 return; 3760 3761 budget = ENA_MONITORED_TX_QUEUES; 3762 3763 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { 3764 tx_ring = &adapter->tx_ring[i]; 3765 rx_ring = &adapter->rx_ring[i]; 3766 3767 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3768 if (unlikely(rc)) 3769 return; 3770 3771 rc = !ENA_IS_XDP_INDEX(adapter, i) ? 3772 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; 3773 if (unlikely(rc)) 3774 return; 3775 3776 budget--; 3777 if (!budget) 3778 break; 3779 } 3780 3781 adapter->last_monitored_tx_qid = i % io_queue_count; 3782 } 3783 3784 /* trigger napi schedule after 2 consecutive detections */ 3785 #define EMPTY_RX_REFILL 2 3786 /* For the rare case where the device runs out of Rx descriptors and the 3787 * napi handler failed to refill new Rx descriptors (due to a lack of memory 3788 * for example). 3789 * This case will lead to a deadlock: 3790 * The device won't send interrupts since all the new Rx packets will be dropped 3791 * The napi handler won't allocate new Rx descriptors so the device will be 3792 * able to send new packets. 3793 * 3794 * This scenario can happen when the kernel's vm.min_free_kbytes is too small. 3795 * It is recommended to have at least 512MB, with a minimum of 128MB for 3796 * constrained environment). 3797 * 3798 * When such a situation is detected - Reschedule napi 3799 */ 3800 static void check_for_empty_rx_ring(struct ena_adapter *adapter) 3801 { 3802 struct ena_ring *rx_ring; 3803 int i, refill_required; 3804 3805 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 3806 return; 3807 3808 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 3809 return; 3810 3811 for (i = 0; i < adapter->num_io_queues; i++) { 3812 rx_ring = &adapter->rx_ring[i]; 3813 3814 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 3815 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3816 rx_ring->empty_rx_queue++; 3817 3818 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3819 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, 3820 &rx_ring->syncp); 3821 3822 netif_err(adapter, drv, adapter->netdev, 3823 "Trigger refill for ring %d\n", i); 3824 3825 napi_schedule(rx_ring->napi); 3826 rx_ring->empty_rx_queue = 0; 3827 } 3828 } else { 3829 rx_ring->empty_rx_queue = 0; 3830 } 3831 } 3832 } 3833 3834 /* Check for keep alive expiration */ 3835 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 3836 { 3837 unsigned long keep_alive_expired; 3838 3839 if (!adapter->wd_state) 3840 return; 3841 3842 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 3843 return; 3844 3845 keep_alive_expired = adapter->last_keep_alive_jiffies + 3846 adapter->keep_alive_timeout; 3847 if (unlikely(time_is_before_jiffies(keep_alive_expired))) { 3848 netif_err(adapter, drv, adapter->netdev, 3849 "Keep alive watchdog timeout.\n"); 3850 ena_increase_stat(&adapter->dev_stats.wd_expired, 1, 3851 &adapter->syncp); 3852 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 3853 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3854 } 3855 } 3856 3857 static void check_for_admin_com_state(struct ena_adapter *adapter) 3858 { 3859 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { 3860 netif_err(adapter, drv, adapter->netdev, 3861 "ENA admin queue is not in running state!\n"); 3862 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, 3863 &adapter->syncp); 3864 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 3865 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3866 } 3867 } 3868 3869 static void ena_update_hints(struct ena_adapter *adapter, 3870 struct ena_admin_ena_hw_hints *hints) 3871 { 3872 struct net_device *netdev = adapter->netdev; 3873 3874 if (hints->admin_completion_tx_timeout) 3875 adapter->ena_dev->admin_queue.completion_timeout = 3876 hints->admin_completion_tx_timeout * 1000; 3877 3878 if (hints->mmio_read_timeout) 3879 /* convert to usec */ 3880 adapter->ena_dev->mmio_read.reg_read_to = 3881 hints->mmio_read_timeout * 1000; 3882 3883 if (hints->missed_tx_completion_count_threshold_to_reset) 3884 adapter->missing_tx_completion_threshold = 3885 hints->missed_tx_completion_count_threshold_to_reset; 3886 3887 if (hints->missing_tx_completion_timeout) { 3888 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) 3889 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; 3890 else 3891 adapter->missing_tx_completion_to = 3892 msecs_to_jiffies(hints->missing_tx_completion_timeout); 3893 } 3894 3895 if (hints->netdev_wd_timeout) 3896 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); 3897 3898 if (hints->driver_watchdog_timeout) { 3899 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 3900 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 3901 else 3902 adapter->keep_alive_timeout = 3903 msecs_to_jiffies(hints->driver_watchdog_timeout); 3904 } 3905 } 3906 3907 static void ena_update_host_info(struct ena_admin_host_info *host_info, 3908 struct net_device *netdev) 3909 { 3910 host_info->supported_network_features[0] = 3911 netdev->features & GENMASK_ULL(31, 0); 3912 host_info->supported_network_features[1] = 3913 (netdev->features & GENMASK_ULL(63, 32)) >> 32; 3914 } 3915 3916 static void ena_timer_service(struct timer_list *t) 3917 { 3918 struct ena_adapter *adapter = from_timer(adapter, t, timer_service); 3919 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; 3920 struct ena_admin_host_info *host_info = 3921 adapter->ena_dev->host_attr.host_info; 3922 3923 check_for_missing_keep_alive(adapter); 3924 3925 check_for_admin_com_state(adapter); 3926 3927 check_for_missing_completions(adapter); 3928 3929 check_for_empty_rx_ring(adapter); 3930 3931 if (debug_area) 3932 ena_dump_stats_to_buf(adapter, debug_area); 3933 3934 if (host_info) 3935 ena_update_host_info(host_info, adapter->netdev); 3936 3937 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 3938 netif_err(adapter, drv, adapter->netdev, 3939 "Trigger reset is on\n"); 3940 ena_dump_stats_to_dmesg(adapter); 3941 queue_work(ena_wq, &adapter->reset_task); 3942 return; 3943 } 3944 3945 /* Reset the timer */ 3946 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 3947 } 3948 3949 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, 3950 struct ena_com_dev *ena_dev, 3951 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3952 { 3953 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 3954 3955 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 3956 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 3957 &get_feat_ctx->max_queue_ext.max_queue_ext; 3958 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, 3959 max_queue_ext->max_rx_cq_num); 3960 3961 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 3962 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 3963 } else { 3964 struct ena_admin_queue_feature_desc *max_queues = 3965 &get_feat_ctx->max_queues; 3966 io_tx_sq_num = max_queues->max_sq_num; 3967 io_tx_cq_num = max_queues->max_cq_num; 3968 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num); 3969 } 3970 3971 /* In case of LLQ use the llq fields for the tx SQ/CQ */ 3972 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 3973 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 3974 3975 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); 3976 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); 3977 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); 3978 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); 3979 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ 3980 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); 3981 if (unlikely(!max_num_io_queues)) { 3982 dev_err(&pdev->dev, "The device doesn't have io queues\n"); 3983 return -EFAULT; 3984 } 3985 3986 return max_num_io_queues; 3987 } 3988 3989 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, 3990 struct net_device *netdev) 3991 { 3992 netdev_features_t dev_features = 0; 3993 3994 /* Set offload features */ 3995 if (feat->offload.tx & 3996 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 3997 dev_features |= NETIF_F_IP_CSUM; 3998 3999 if (feat->offload.tx & 4000 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 4001 dev_features |= NETIF_F_IPV6_CSUM; 4002 4003 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 4004 dev_features |= NETIF_F_TSO; 4005 4006 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) 4007 dev_features |= NETIF_F_TSO6; 4008 4009 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) 4010 dev_features |= NETIF_F_TSO_ECN; 4011 4012 if (feat->offload.rx_supported & 4013 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 4014 dev_features |= NETIF_F_RXCSUM; 4015 4016 if (feat->offload.rx_supported & 4017 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 4018 dev_features |= NETIF_F_RXCSUM; 4019 4020 netdev->features = 4021 dev_features | 4022 NETIF_F_SG | 4023 NETIF_F_RXHASH | 4024 NETIF_F_HIGHDMA; 4025 4026 netdev->hw_features |= netdev->features; 4027 netdev->vlan_features |= netdev->features; 4028 } 4029 4030 static void ena_set_conf_feat_params(struct ena_adapter *adapter, 4031 struct ena_com_dev_get_features_ctx *feat) 4032 { 4033 struct net_device *netdev = adapter->netdev; 4034 4035 /* Copy mac address */ 4036 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { 4037 eth_hw_addr_random(netdev); 4038 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); 4039 } else { 4040 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); 4041 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 4042 } 4043 4044 /* Set offload features */ 4045 ena_set_dev_offloads(feat, netdev); 4046 4047 adapter->max_mtu = feat->dev_attr.max_mtu; 4048 netdev->max_mtu = adapter->max_mtu; 4049 netdev->min_mtu = ENA_MIN_MTU; 4050 } 4051 4052 static int ena_rss_init_default(struct ena_adapter *adapter) 4053 { 4054 struct ena_com_dev *ena_dev = adapter->ena_dev; 4055 struct device *dev = &adapter->pdev->dev; 4056 int rc, i; 4057 u32 val; 4058 4059 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 4060 if (unlikely(rc)) { 4061 dev_err(dev, "Cannot init indirect table\n"); 4062 goto err_rss_init; 4063 } 4064 4065 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 4066 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); 4067 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 4068 ENA_IO_RXQ_IDX(val)); 4069 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 4070 dev_err(dev, "Cannot fill indirect table\n"); 4071 goto err_fill_indir; 4072 } 4073 } 4074 4075 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, 4076 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 4077 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 4078 dev_err(dev, "Cannot fill hash function\n"); 4079 goto err_fill_indir; 4080 } 4081 4082 rc = ena_com_set_default_hash_ctrl(ena_dev); 4083 if (unlikely(rc && (rc != -EOPNOTSUPP))) { 4084 dev_err(dev, "Cannot fill hash control\n"); 4085 goto err_fill_indir; 4086 } 4087 4088 return 0; 4089 4090 err_fill_indir: 4091 ena_com_rss_destroy(ena_dev); 4092 err_rss_init: 4093 4094 return rc; 4095 } 4096 4097 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 4098 { 4099 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 4100 4101 pci_release_selected_regions(pdev, release_bars); 4102 } 4103 4104 4105 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) 4106 { 4107 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 4108 struct ena_com_dev *ena_dev = ctx->ena_dev; 4109 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; 4110 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; 4111 u32 max_tx_queue_size; 4112 u32 max_rx_queue_size; 4113 4114 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 4115 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 4116 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 4117 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, 4118 max_queue_ext->max_rx_sq_depth); 4119 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 4120 4121 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 4122 max_tx_queue_size = min_t(u32, max_tx_queue_size, 4123 llq->max_llq_depth); 4124 else 4125 max_tx_queue_size = min_t(u32, max_tx_queue_size, 4126 max_queue_ext->max_tx_sq_depth); 4127 4128 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 4129 max_queue_ext->max_per_packet_tx_descs); 4130 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 4131 max_queue_ext->max_per_packet_rx_descs); 4132 } else { 4133 struct ena_admin_queue_feature_desc *max_queues = 4134 &ctx->get_feat_ctx->max_queues; 4135 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, 4136 max_queues->max_sq_depth); 4137 max_tx_queue_size = max_queues->max_cq_depth; 4138 4139 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 4140 max_tx_queue_size = min_t(u32, max_tx_queue_size, 4141 llq->max_llq_depth); 4142 else 4143 max_tx_queue_size = min_t(u32, max_tx_queue_size, 4144 max_queues->max_sq_depth); 4145 4146 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 4147 max_queues->max_packet_tx_descs); 4148 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, 4149 max_queues->max_packet_rx_descs); 4150 } 4151 4152 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); 4153 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); 4154 4155 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, 4156 max_tx_queue_size); 4157 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, 4158 max_rx_queue_size); 4159 4160 tx_queue_size = rounddown_pow_of_two(tx_queue_size); 4161 rx_queue_size = rounddown_pow_of_two(rx_queue_size); 4162 4163 ctx->max_tx_queue_size = max_tx_queue_size; 4164 ctx->max_rx_queue_size = max_rx_queue_size; 4165 ctx->tx_queue_size = tx_queue_size; 4166 ctx->rx_queue_size = rx_queue_size; 4167 4168 return 0; 4169 } 4170 4171 /* ena_probe - Device Initialization Routine 4172 * @pdev: PCI device information struct 4173 * @ent: entry in ena_pci_tbl 4174 * 4175 * Returns 0 on success, negative on failure 4176 * 4177 * ena_probe initializes an adapter identified by a pci_dev structure. 4178 * The OS initialization, configuring of the adapter private structure, 4179 * and a hardware reset occur. 4180 */ 4181 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4182 { 4183 struct ena_calc_queue_size_ctx calc_queue_ctx = {}; 4184 struct ena_com_dev_get_features_ctx get_feat_ctx; 4185 struct ena_com_dev *ena_dev = NULL; 4186 struct ena_adapter *adapter; 4187 struct net_device *netdev; 4188 static int adapters_found; 4189 u32 max_num_io_queues; 4190 bool wd_state; 4191 int bars, rc; 4192 4193 dev_dbg(&pdev->dev, "%s\n", __func__); 4194 4195 rc = pci_enable_device_mem(pdev); 4196 if (rc) { 4197 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); 4198 return rc; 4199 } 4200 4201 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); 4202 if (rc) { 4203 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); 4204 goto err_disable_device; 4205 } 4206 4207 pci_set_master(pdev); 4208 4209 ena_dev = vzalloc(sizeof(*ena_dev)); 4210 if (!ena_dev) { 4211 rc = -ENOMEM; 4212 goto err_disable_device; 4213 } 4214 4215 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 4216 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); 4217 if (rc) { 4218 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", 4219 rc); 4220 goto err_free_ena_dev; 4221 } 4222 4223 ena_dev->reg_bar = devm_ioremap(&pdev->dev, 4224 pci_resource_start(pdev, ENA_REG_BAR), 4225 pci_resource_len(pdev, ENA_REG_BAR)); 4226 if (!ena_dev->reg_bar) { 4227 dev_err(&pdev->dev, "Failed to remap regs bar\n"); 4228 rc = -EFAULT; 4229 goto err_free_region; 4230 } 4231 4232 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; 4233 4234 ena_dev->dmadev = &pdev->dev; 4235 4236 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); 4237 if (!netdev) { 4238 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); 4239 rc = -ENOMEM; 4240 goto err_free_region; 4241 } 4242 4243 SET_NETDEV_DEV(netdev, &pdev->dev); 4244 adapter = netdev_priv(netdev); 4245 adapter->ena_dev = ena_dev; 4246 adapter->netdev = netdev; 4247 adapter->pdev = pdev; 4248 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 4249 4250 ena_dev->net_device = netdev; 4251 4252 pci_set_drvdata(pdev, adapter); 4253 4254 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); 4255 if (rc) { 4256 dev_err(&pdev->dev, "ENA device init failed\n"); 4257 if (rc == -ETIME) 4258 rc = -EPROBE_DEFER; 4259 goto err_netdev_destroy; 4260 } 4261 4262 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); 4263 if (rc) { 4264 dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); 4265 goto err_device_destroy; 4266 } 4267 4268 calc_queue_ctx.ena_dev = ena_dev; 4269 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 4270 calc_queue_ctx.pdev = pdev; 4271 4272 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. 4273 * Updated during device initialization with the real granularity 4274 */ 4275 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; 4276 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; 4277 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; 4278 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); 4279 rc = ena_calc_io_queue_size(&calc_queue_ctx); 4280 if (rc || !max_num_io_queues) { 4281 rc = -EFAULT; 4282 goto err_device_destroy; 4283 } 4284 4285 ena_set_conf_feat_params(adapter, &get_feat_ctx); 4286 4287 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 4288 4289 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; 4290 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; 4291 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 4292 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 4293 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 4294 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 4295 4296 adapter->num_io_queues = max_num_io_queues; 4297 adapter->max_num_io_queues = max_num_io_queues; 4298 adapter->last_monitored_tx_qid = 0; 4299 4300 adapter->xdp_first_ring = 0; 4301 adapter->xdp_num_queues = 0; 4302 4303 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; 4304 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 4305 adapter->disable_meta_caching = 4306 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 4307 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 4308 4309 adapter->wd_state = wd_state; 4310 4311 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); 4312 4313 rc = ena_com_init_interrupt_moderation(adapter->ena_dev); 4314 if (rc) { 4315 dev_err(&pdev->dev, 4316 "Failed to query interrupt moderation feature\n"); 4317 goto err_device_destroy; 4318 } 4319 ena_init_io_rings(adapter, 4320 0, 4321 adapter->xdp_num_queues + 4322 adapter->num_io_queues); 4323 4324 netdev->netdev_ops = &ena_netdev_ops; 4325 netdev->watchdog_timeo = TX_TIMEOUT; 4326 ena_set_ethtool_ops(netdev); 4327 4328 netdev->priv_flags |= IFF_UNICAST_FLT; 4329 4330 u64_stats_init(&adapter->syncp); 4331 4332 rc = ena_enable_msix_and_set_admin_interrupts(adapter); 4333 if (rc) { 4334 dev_err(&pdev->dev, 4335 "Failed to enable and set the admin interrupts\n"); 4336 goto err_worker_destroy; 4337 } 4338 rc = ena_rss_init_default(adapter); 4339 if (rc && (rc != -EOPNOTSUPP)) { 4340 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); 4341 goto err_free_msix; 4342 } 4343 4344 ena_config_debug_area(adapter); 4345 4346 if (!ena_update_hw_stats(adapter)) 4347 adapter->eni_stats_supported = true; 4348 else 4349 adapter->eni_stats_supported = false; 4350 4351 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); 4352 4353 netif_carrier_off(netdev); 4354 4355 rc = register_netdev(netdev); 4356 if (rc) { 4357 dev_err(&pdev->dev, "Cannot register net device\n"); 4358 goto err_rss; 4359 } 4360 4361 INIT_WORK(&adapter->reset_task, ena_fw_reset_device); 4362 4363 adapter->last_keep_alive_jiffies = jiffies; 4364 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 4365 adapter->missing_tx_completion_to = TX_TIMEOUT; 4366 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; 4367 4368 ena_update_hints(adapter, &get_feat_ctx.hw_hints); 4369 4370 timer_setup(&adapter->timer_service, ena_timer_service, 0); 4371 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 4372 4373 dev_info(&pdev->dev, 4374 "%s found at mem %lx, mac addr %pM\n", 4375 DEVICE_NAME, (long)pci_resource_start(pdev, 0), 4376 netdev->dev_addr); 4377 4378 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 4379 4380 adapters_found++; 4381 4382 return 0; 4383 4384 err_rss: 4385 ena_com_delete_debug_area(ena_dev); 4386 ena_com_rss_destroy(ena_dev); 4387 err_free_msix: 4388 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 4389 /* stop submitting admin commands on a device that was reset */ 4390 ena_com_set_admin_running_state(ena_dev, false); 4391 ena_free_mgmnt_irq(adapter); 4392 ena_disable_msix(adapter); 4393 err_worker_destroy: 4394 del_timer(&adapter->timer_service); 4395 err_device_destroy: 4396 ena_com_delete_host_info(ena_dev); 4397 ena_com_admin_destroy(ena_dev); 4398 err_netdev_destroy: 4399 free_netdev(netdev); 4400 err_free_region: 4401 ena_release_bars(ena_dev, pdev); 4402 err_free_ena_dev: 4403 vfree(ena_dev); 4404 err_disable_device: 4405 pci_disable_device(pdev); 4406 return rc; 4407 } 4408 4409 /*****************************************************************************/ 4410 4411 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines 4412 * @pdev: PCI device information struct 4413 * @shutdown: Is it a shutdown operation? If false, means it is a removal 4414 * 4415 * __ena_shutoff is a helper routine that does the real work on shutdown and 4416 * removal paths; the difference between those paths is with regards to whether 4417 * dettach or unregister the netdevice. 4418 */ 4419 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) 4420 { 4421 struct ena_adapter *adapter = pci_get_drvdata(pdev); 4422 struct ena_com_dev *ena_dev; 4423 struct net_device *netdev; 4424 4425 ena_dev = adapter->ena_dev; 4426 netdev = adapter->netdev; 4427 4428 #ifdef CONFIG_RFS_ACCEL 4429 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { 4430 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 4431 netdev->rx_cpu_rmap = NULL; 4432 } 4433 #endif /* CONFIG_RFS_ACCEL */ 4434 4435 /* Make sure timer and reset routine won't be called after 4436 * freeing device resources. 4437 */ 4438 del_timer_sync(&adapter->timer_service); 4439 cancel_work_sync(&adapter->reset_task); 4440 4441 rtnl_lock(); /* lock released inside the below if-else block */ 4442 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; 4443 ena_destroy_device(adapter, true); 4444 if (shutdown) { 4445 netif_device_detach(netdev); 4446 dev_close(netdev); 4447 rtnl_unlock(); 4448 } else { 4449 rtnl_unlock(); 4450 unregister_netdev(netdev); 4451 free_netdev(netdev); 4452 } 4453 4454 ena_com_rss_destroy(ena_dev); 4455 4456 ena_com_delete_debug_area(ena_dev); 4457 4458 ena_com_delete_host_info(ena_dev); 4459 4460 ena_release_bars(ena_dev, pdev); 4461 4462 pci_disable_device(pdev); 4463 4464 vfree(ena_dev); 4465 } 4466 4467 /* ena_remove - Device Removal Routine 4468 * @pdev: PCI device information struct 4469 * 4470 * ena_remove is called by the PCI subsystem to alert the driver 4471 * that it should release a PCI device. 4472 */ 4473 4474 static void ena_remove(struct pci_dev *pdev) 4475 { 4476 __ena_shutoff(pdev, false); 4477 } 4478 4479 /* ena_shutdown - Device Shutdown Routine 4480 * @pdev: PCI device information struct 4481 * 4482 * ena_shutdown is called by the PCI subsystem to alert the driver that 4483 * a shutdown/reboot (or kexec) is happening and device must be disabled. 4484 */ 4485 4486 static void ena_shutdown(struct pci_dev *pdev) 4487 { 4488 __ena_shutoff(pdev, true); 4489 } 4490 4491 /* ena_suspend - PM suspend callback 4492 * @dev_d: Device information struct 4493 */ 4494 static int __maybe_unused ena_suspend(struct device *dev_d) 4495 { 4496 struct pci_dev *pdev = to_pci_dev(dev_d); 4497 struct ena_adapter *adapter = pci_get_drvdata(pdev); 4498 4499 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); 4500 4501 rtnl_lock(); 4502 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { 4503 dev_err(&pdev->dev, 4504 "Ignoring device reset request as the device is being suspended\n"); 4505 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 4506 } 4507 ena_destroy_device(adapter, true); 4508 rtnl_unlock(); 4509 return 0; 4510 } 4511 4512 /* ena_resume - PM resume callback 4513 * @dev_d: Device information struct 4514 */ 4515 static int __maybe_unused ena_resume(struct device *dev_d) 4516 { 4517 struct ena_adapter *adapter = dev_get_drvdata(dev_d); 4518 int rc; 4519 4520 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); 4521 4522 rtnl_lock(); 4523 rc = ena_restore_device(adapter); 4524 rtnl_unlock(); 4525 return rc; 4526 } 4527 4528 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); 4529 4530 static struct pci_driver ena_pci_driver = { 4531 .name = DRV_MODULE_NAME, 4532 .id_table = ena_pci_tbl, 4533 .probe = ena_probe, 4534 .remove = ena_remove, 4535 .shutdown = ena_shutdown, 4536 .driver.pm = &ena_pm_ops, 4537 .sriov_configure = pci_sriov_configure_simple, 4538 }; 4539 4540 static int __init ena_init(void) 4541 { 4542 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); 4543 if (!ena_wq) { 4544 pr_err("Failed to create workqueue\n"); 4545 return -ENOMEM; 4546 } 4547 4548 return pci_register_driver(&ena_pci_driver); 4549 } 4550 4551 static void __exit ena_cleanup(void) 4552 { 4553 pci_unregister_driver(&ena_pci_driver); 4554 4555 if (ena_wq) { 4556 destroy_workqueue(ena_wq); 4557 ena_wq = NULL; 4558 } 4559 } 4560 4561 /****************************************************************************** 4562 ******************************** AENQ Handlers ******************************* 4563 *****************************************************************************/ 4564 /* ena_update_on_link_change: 4565 * Notify the network interface about the change in link status 4566 */ 4567 static void ena_update_on_link_change(void *adapter_data, 4568 struct ena_admin_aenq_entry *aenq_e) 4569 { 4570 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4571 struct ena_admin_aenq_link_change_desc *aenq_desc = 4572 (struct ena_admin_aenq_link_change_desc *)aenq_e; 4573 int status = aenq_desc->flags & 4574 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 4575 4576 if (status) { 4577 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); 4578 set_bit(ENA_FLAG_LINK_UP, &adapter->flags); 4579 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) 4580 netif_carrier_on(adapter->netdev); 4581 } else { 4582 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); 4583 netif_carrier_off(adapter->netdev); 4584 } 4585 } 4586 4587 static void ena_keep_alive_wd(void *adapter_data, 4588 struct ena_admin_aenq_entry *aenq_e) 4589 { 4590 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4591 struct ena_admin_aenq_keep_alive_desc *desc; 4592 u64 rx_drops; 4593 u64 tx_drops; 4594 4595 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 4596 adapter->last_keep_alive_jiffies = jiffies; 4597 4598 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; 4599 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; 4600 4601 u64_stats_update_begin(&adapter->syncp); 4602 /* These stats are accumulated by the device, so the counters indicate 4603 * all drops since last reset. 4604 */ 4605 adapter->dev_stats.rx_drops = rx_drops; 4606 adapter->dev_stats.tx_drops = tx_drops; 4607 u64_stats_update_end(&adapter->syncp); 4608 } 4609 4610 static void ena_notification(void *adapter_data, 4611 struct ena_admin_aenq_entry *aenq_e) 4612 { 4613 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4614 struct ena_admin_ena_hw_hints *hints; 4615 4616 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 4617 "Invalid group(%x) expected %x\n", 4618 aenq_e->aenq_common_desc.group, 4619 ENA_ADMIN_NOTIFICATION); 4620 4621 switch (aenq_e->aenq_common_desc.syndrome) { 4622 case ENA_ADMIN_UPDATE_HINTS: 4623 hints = (struct ena_admin_ena_hw_hints *) 4624 (&aenq_e->inline_data_w4); 4625 ena_update_hints(adapter, hints); 4626 break; 4627 default: 4628 netif_err(adapter, drv, adapter->netdev, 4629 "Invalid aenq notification link state %d\n", 4630 aenq_e->aenq_common_desc.syndrome); 4631 } 4632 } 4633 4634 /* This handler will called for unknown event group or unimplemented handlers*/ 4635 static void unimplemented_aenq_handler(void *data, 4636 struct ena_admin_aenq_entry *aenq_e) 4637 { 4638 struct ena_adapter *adapter = (struct ena_adapter *)data; 4639 4640 netif_err(adapter, drv, adapter->netdev, 4641 "Unknown event was received or event with unimplemented handler\n"); 4642 } 4643 4644 static struct ena_aenq_handlers aenq_handlers = { 4645 .handlers = { 4646 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 4647 [ENA_ADMIN_NOTIFICATION] = ena_notification, 4648 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 4649 }, 4650 .unimplemented_handler = unimplemented_aenq_handler 4651 }; 4652 4653 module_init(ena_init); 4654 module_exit(ena_cleanup); 4655