1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #ifndef ENA_COM 7 #define ENA_COM 8 9 #include <linux/compiler.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/gfp.h> 13 #include <linux/io.h> 14 #include <linux/prefetch.h> 15 #include <linux/sched.h> 16 #include <linux/sizes.h> 17 #include <linux/spinlock.h> 18 #include <linux/types.h> 19 #include <linux/wait.h> 20 #include <linux/netdevice.h> 21 22 #include "ena_common_defs.h" 23 #include "ena_admin_defs.h" 24 #include "ena_eth_io_defs.h" 25 #include "ena_regs_defs.h" 26 27 #undef pr_fmt 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #define ENA_MAX_NUM_IO_QUEUES 128U 31 /* We need to queues for each IO (on for Tx and one for Rx) */ 32 #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) 33 34 #define ENA_MAX_HANDLERS 256 35 36 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 37 38 /* Unit in usec */ 39 #define ENA_REG_READ_TIMEOUT 200000 40 41 #define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) 42 #define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) 43 #define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) 44 45 /*****************************************************************************/ 46 /*****************************************************************************/ 47 /* ENA adaptive interrupt moderation settings */ 48 49 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 50 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 51 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 52 53 #define ENA_HASH_KEY_SIZE 40 54 55 #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF 56 57 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 58 59 struct ena_llq_configurations { 60 enum ena_admin_llq_header_location llq_header_location; 61 enum ena_admin_llq_ring_entry_size llq_ring_entry_size; 62 enum ena_admin_llq_stride_ctrl llq_stride_ctrl; 63 enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; 64 u16 llq_ring_entry_size_value; 65 }; 66 67 enum queue_direction { 68 ENA_COM_IO_QUEUE_DIRECTION_TX, 69 ENA_COM_IO_QUEUE_DIRECTION_RX 70 }; 71 72 struct ena_com_buf { 73 dma_addr_t paddr; /**< Buffer physical address */ 74 u16 len; /**< Buffer length in bytes */ 75 }; 76 77 struct ena_com_rx_buf_info { 78 u16 len; 79 u16 req_id; 80 }; 81 82 struct ena_com_io_desc_addr { 83 u8 __iomem *pbuf_dev_addr; /* LLQ address */ 84 u8 *virt_addr; 85 dma_addr_t phys_addr; 86 }; 87 88 struct ena_com_tx_meta { 89 u16 mss; 90 u16 l3_hdr_len; 91 u16 l3_hdr_offset; 92 u16 l4_hdr_len; /* In words */ 93 }; 94 95 struct ena_com_llq_info { 96 u16 header_location_ctrl; 97 u16 desc_stride_ctrl; 98 u16 desc_list_entry_size_ctrl; 99 u16 desc_list_entry_size; 100 u16 descs_num_before_header; 101 u16 descs_per_entry; 102 u16 max_entries_in_tx_burst; 103 bool disable_meta_caching; 104 }; 105 106 struct ena_com_io_cq { 107 struct ena_com_io_desc_addr cdesc_addr; 108 109 /* Interrupt unmask register */ 110 u32 __iomem *unmask_reg; 111 112 /* The completion queue head doorbell register */ 113 u32 __iomem *cq_head_db_reg; 114 115 /* numa configuration register (for TPH) */ 116 u32 __iomem *numa_node_cfg_reg; 117 118 /* The value to write to the above register to unmask 119 * the interrupt of this queue 120 */ 121 u32 msix_vector; 122 123 enum queue_direction direction; 124 125 /* holds the number of cdesc of the current packet */ 126 u16 cur_rx_pkt_cdesc_count; 127 /* save the firt cdesc idx of the current packet */ 128 u16 cur_rx_pkt_cdesc_start_idx; 129 130 u16 q_depth; 131 /* Caller qid */ 132 u16 qid; 133 134 /* Device queue index */ 135 u16 idx; 136 u16 head; 137 u16 last_head_update; 138 u8 phase; 139 u8 cdesc_entry_size_in_bytes; 140 141 } ____cacheline_aligned; 142 143 struct ena_com_io_bounce_buffer_control { 144 u8 *base_buffer; 145 u16 next_to_use; 146 u16 buffer_size; 147 u16 buffers_num; /* Must be a power of 2 */ 148 }; 149 150 /* This struct is to keep tracking the current location of the next llq entry */ 151 struct ena_com_llq_pkt_ctrl { 152 u8 *curr_bounce_buf; 153 u16 idx; 154 u16 descs_left_in_line; 155 }; 156 157 struct ena_com_io_sq { 158 struct ena_com_io_desc_addr desc_addr; 159 160 u32 __iomem *db_addr; 161 u8 __iomem *header_addr; 162 163 enum queue_direction direction; 164 enum ena_admin_placement_policy_type mem_queue_type; 165 166 bool disable_meta_caching; 167 168 u32 msix_vector; 169 struct ena_com_tx_meta cached_tx_meta; 170 struct ena_com_llq_info llq_info; 171 struct ena_com_llq_pkt_ctrl llq_buf_ctrl; 172 struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; 173 174 u16 q_depth; 175 u16 qid; 176 177 u16 idx; 178 u16 tail; 179 u16 next_to_comp; 180 u16 llq_last_copy_tail; 181 u32 tx_max_header_size; 182 u8 phase; 183 u8 desc_entry_size; 184 u8 dma_addr_bits; 185 u16 entries_in_tx_burst_left; 186 } ____cacheline_aligned; 187 188 struct ena_com_admin_cq { 189 struct ena_admin_acq_entry *entries; 190 dma_addr_t dma_addr; 191 192 u16 head; 193 u8 phase; 194 }; 195 196 struct ena_com_admin_sq { 197 struct ena_admin_aq_entry *entries; 198 dma_addr_t dma_addr; 199 200 u32 __iomem *db_addr; 201 202 u16 head; 203 u16 tail; 204 u8 phase; 205 206 }; 207 208 struct ena_com_stats_admin { 209 u64 aborted_cmd; 210 u64 submitted_cmd; 211 u64 completed_cmd; 212 u64 out_of_space; 213 u64 no_completion; 214 }; 215 216 struct ena_com_admin_queue { 217 void *q_dmadev; 218 struct ena_com_dev *ena_dev; 219 spinlock_t q_lock; /* spinlock for the admin queue */ 220 221 struct ena_comp_ctx *comp_ctx; 222 u32 completion_timeout; 223 u16 q_depth; 224 struct ena_com_admin_cq cq; 225 struct ena_com_admin_sq sq; 226 227 /* Indicate if the admin queue should poll for completion */ 228 bool polling; 229 230 /* Define if fallback to polling mode should occur */ 231 bool auto_polling; 232 233 u16 curr_cmd_id; 234 235 /* Indicate that the ena was initialized and can 236 * process new admin commands 237 */ 238 bool running_state; 239 240 /* Count the number of outstanding admin commands */ 241 atomic_t outstanding_cmds; 242 243 struct ena_com_stats_admin stats; 244 }; 245 246 struct ena_aenq_handlers; 247 248 struct ena_com_aenq { 249 u16 head; 250 u8 phase; 251 struct ena_admin_aenq_entry *entries; 252 dma_addr_t dma_addr; 253 u16 q_depth; 254 struct ena_aenq_handlers *aenq_handlers; 255 }; 256 257 struct ena_com_mmio_read { 258 struct ena_admin_ena_mmio_req_read_less_resp *read_resp; 259 dma_addr_t read_resp_dma_addr; 260 u32 reg_read_to; /* in us */ 261 u16 seq_num; 262 bool readless_supported; 263 /* spin lock to ensure a single outstanding read */ 264 spinlock_t lock; 265 }; 266 267 struct ena_rss { 268 /* Indirect table */ 269 u16 *host_rss_ind_tbl; 270 struct ena_admin_rss_ind_table_entry *rss_ind_tbl; 271 dma_addr_t rss_ind_tbl_dma_addr; 272 u16 tbl_log_size; 273 274 /* Hash key */ 275 enum ena_admin_hash_functions hash_func; 276 struct ena_admin_feature_rss_flow_hash_control *hash_key; 277 dma_addr_t hash_key_dma_addr; 278 u32 hash_init_val; 279 280 /* Flow Control */ 281 struct ena_admin_feature_rss_hash_control *hash_ctrl; 282 dma_addr_t hash_ctrl_dma_addr; 283 284 }; 285 286 struct ena_host_attribute { 287 /* Debug area */ 288 u8 *debug_area_virt_addr; 289 dma_addr_t debug_area_dma_addr; 290 u32 debug_area_size; 291 292 /* Host information */ 293 struct ena_admin_host_info *host_info; 294 dma_addr_t host_info_dma_addr; 295 }; 296 297 /* Each ena_dev is a PCI function. */ 298 struct ena_com_dev { 299 struct ena_com_admin_queue admin_queue; 300 struct ena_com_aenq aenq; 301 struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; 302 struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; 303 u8 __iomem *reg_bar; 304 void __iomem *mem_bar; 305 void *dmadev; 306 307 enum ena_admin_placement_policy_type tx_mem_queue_type; 308 u32 tx_max_header_size; 309 u16 stats_func; /* Selected function for extended statistic dump */ 310 u16 stats_queue; /* Selected queue for extended statistic dump */ 311 312 struct ena_com_mmio_read mmio_read; 313 314 struct ena_rss rss; 315 u32 supported_features; 316 u32 dma_addr_bits; 317 318 struct ena_host_attribute host_attr; 319 bool adaptive_coalescing; 320 u16 intr_delay_resolution; 321 322 /* interrupt moderation intervals are in usec divided by 323 * intr_delay_resolution, which is supplied by the device. 324 */ 325 u32 intr_moder_tx_interval; 326 u32 intr_moder_rx_interval; 327 328 struct ena_intr_moder_entry *intr_moder_tbl; 329 330 struct ena_com_llq_info llq_info; 331 332 u32 ena_min_poll_delay_us; 333 }; 334 335 struct ena_com_dev_get_features_ctx { 336 struct ena_admin_queue_feature_desc max_queues; 337 struct ena_admin_queue_ext_feature_desc max_queue_ext; 338 struct ena_admin_device_attr_feature_desc dev_attr; 339 struct ena_admin_feature_aenq_desc aenq; 340 struct ena_admin_feature_offload_desc offload; 341 struct ena_admin_ena_hw_hints hw_hints; 342 struct ena_admin_feature_llq_desc llq; 343 }; 344 345 struct ena_com_create_io_ctx { 346 enum ena_admin_placement_policy_type mem_queue_type; 347 enum queue_direction direction; 348 int numa_node; 349 u32 msix_vector; 350 u16 queue_size; 351 u16 qid; 352 }; 353 354 typedef void (*ena_aenq_handler)(void *data, 355 struct ena_admin_aenq_entry *aenq_e); 356 357 /* Holds aenq handlers. Indexed by AENQ event group */ 358 struct ena_aenq_handlers { 359 ena_aenq_handler handlers[ENA_MAX_HANDLERS]; 360 ena_aenq_handler unimplemented_handler; 361 }; 362 363 /*****************************************************************************/ 364 /*****************************************************************************/ 365 366 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism 367 * @ena_dev: ENA communication layer struct 368 * 369 * Initialize the register read mechanism. 370 * 371 * @note: This method must be the first stage in the initialization sequence. 372 * 373 * @return - 0 on success, negative value on failure. 374 */ 375 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); 376 377 /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism 378 * @ena_dev: ENA communication layer struct 379 * @readless_supported: readless mode (enable/disable) 380 */ 381 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, 382 bool readless_supported); 383 384 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return 385 * value physical address. 386 * @ena_dev: ENA communication layer struct 387 */ 388 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); 389 390 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism 391 * @ena_dev: ENA communication layer struct 392 */ 393 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); 394 395 /* ena_com_admin_init - Init the admin and the async queues 396 * @ena_dev: ENA communication layer struct 397 * @aenq_handlers: Those handlers to be called upon event. 398 * 399 * Initialize the admin submission and completion queues. 400 * Initialize the asynchronous events notification queues. 401 * 402 * @return - 0 on success, negative value on failure. 403 */ 404 int ena_com_admin_init(struct ena_com_dev *ena_dev, 405 struct ena_aenq_handlers *aenq_handlers); 406 407 /* ena_com_admin_destroy - Destroy the admin and the async events queues. 408 * @ena_dev: ENA communication layer struct 409 * 410 * @note: Before calling this method, the caller must validate that the device 411 * won't send any additional admin completions/aenq. 412 * To achieve that, a FLR is recommended. 413 */ 414 void ena_com_admin_destroy(struct ena_com_dev *ena_dev); 415 416 /* ena_com_dev_reset - Perform device FLR to the device. 417 * @ena_dev: ENA communication layer struct 418 * @reset_reason: Specify what is the trigger for the reset in case of an error. 419 * 420 * @return - 0 on success, negative value on failure. 421 */ 422 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 423 enum ena_regs_reset_reason_types reset_reason); 424 425 /* ena_com_create_io_queue - Create io queue. 426 * @ena_dev: ENA communication layer struct 427 * @ctx - create context structure 428 * 429 * Create the submission and the completion queues. 430 * 431 * @return - 0 on success, negative value on failure. 432 */ 433 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 434 struct ena_com_create_io_ctx *ctx); 435 436 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. 437 * @ena_dev: ENA communication layer struct 438 * @qid - the caller virtual queue id. 439 */ 440 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); 441 442 /* ena_com_get_io_handlers - Return the io queue handlers 443 * @ena_dev: ENA communication layer struct 444 * @qid - the caller virtual queue id. 445 * @io_sq - IO submission queue handler 446 * @io_cq - IO completion queue handler. 447 * 448 * @return - 0 on success, negative value on failure. 449 */ 450 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 451 struct ena_com_io_sq **io_sq, 452 struct ena_com_io_cq **io_cq); 453 454 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications 455 * @ena_dev: ENA communication layer struct 456 * 457 * After this method, aenq event can be received via AENQ. 458 */ 459 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); 460 461 /* ena_com_set_admin_running_state - Set the state of the admin queue 462 * @ena_dev: ENA communication layer struct 463 * 464 * Change the state of the admin queue (enable/disable) 465 */ 466 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); 467 468 /* ena_com_get_admin_running_state - Get the admin queue state 469 * @ena_dev: ENA communication layer struct 470 * 471 * Retrieve the state of the admin queue (enable/disable) 472 * 473 * @return - current polling mode (enable/disable) 474 */ 475 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); 476 477 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode 478 * @ena_dev: ENA communication layer struct 479 * @polling: ENAble/Disable polling mode 480 * 481 * Set the admin completion mode. 482 */ 483 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); 484 485 /* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode 486 * @ena_dev: ENA communication layer struct 487 * @polling: Enable/Disable polling mode 488 * 489 * Set the autopolling mode. 490 * If autopolling is on: 491 * In case of missing interrupt when data is available switch to polling. 492 */ 493 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 494 bool polling); 495 496 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler 497 * @ena_dev: ENA communication layer struct 498 * 499 * This method goes over the admin completion queue and wakes up all the pending 500 * threads that wait on the commands wait event. 501 * 502 * @note: Should be called after MSI-X interrupt. 503 */ 504 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); 505 506 /* ena_com_aenq_intr_handler - AENQ interrupt handler 507 * @ena_dev: ENA communication layer struct 508 * 509 * This method goes over the async event notification queue and calls the proper 510 * aenq handler. 511 */ 512 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data); 513 514 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands. 515 * @ena_dev: ENA communication layer struct 516 * 517 * This method aborts all the outstanding admin commands. 518 * The caller should then call ena_com_wait_for_abort_completion to make sure 519 * all the commands were completed. 520 */ 521 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); 522 523 /* ena_com_wait_for_abort_completion - Wait for admin commands abort. 524 * @ena_dev: ENA communication layer struct 525 * 526 * This method waits until all the outstanding admin commands are completed. 527 */ 528 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); 529 530 /* ena_com_validate_version - Validate the device parameters 531 * @ena_dev: ENA communication layer struct 532 * 533 * This method verifies the device parameters are the same as the saved 534 * parameters in ena_dev. 535 * This method is useful after device reset, to validate the device mac address 536 * and the device offloads are the same as before the reset. 537 * 538 * @return - 0 on success negative value otherwise. 539 */ 540 int ena_com_validate_version(struct ena_com_dev *ena_dev); 541 542 /* ena_com_get_link_params - Retrieve physical link parameters. 543 * @ena_dev: ENA communication layer struct 544 * @resp: Link parameters 545 * 546 * Retrieve the physical link parameters, 547 * like speed, auto-negotiation and full duplex support. 548 * 549 * @return - 0 on Success negative value otherwise. 550 */ 551 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 552 struct ena_admin_get_feat_resp *resp); 553 554 /* ena_com_get_dma_width - Retrieve physical dma address width the device 555 * supports. 556 * @ena_dev: ENA communication layer struct 557 * 558 * Retrieve the maximum physical address bits the device can handle. 559 * 560 * @return: > 0 on Success and negative value otherwise. 561 */ 562 int ena_com_get_dma_width(struct ena_com_dev *ena_dev); 563 564 /* ena_com_set_aenq_config - Set aenq groups configurations 565 * @ena_dev: ENA communication layer struct 566 * @groups flag: bit fields flags of enum ena_admin_aenq_group. 567 * 568 * Configure which aenq event group the driver would like to receive. 569 * 570 * @return: 0 on Success and negative value otherwise. 571 */ 572 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); 573 574 /* ena_com_get_dev_attr_feat - Get device features 575 * @ena_dev: ENA communication layer struct 576 * @get_feat_ctx: returned context that contain the get features. 577 * 578 * @return: 0 on Success and negative value otherwise. 579 */ 580 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 581 struct ena_com_dev_get_features_ctx *get_feat_ctx); 582 583 /* ena_com_get_dev_basic_stats - Get device basic statistics 584 * @ena_dev: ENA communication layer struct 585 * @stats: stats return value 586 * 587 * @return: 0 on Success and negative value otherwise. 588 */ 589 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 590 struct ena_admin_basic_stats *stats); 591 592 /* ena_com_get_eni_stats - Get extended network interface statistics 593 * @ena_dev: ENA communication layer struct 594 * @stats: stats return value 595 * 596 * @return: 0 on Success and negative value otherwise. 597 */ 598 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 599 struct ena_admin_eni_stats *stats); 600 601 /* ena_com_set_dev_mtu - Configure the device mtu. 602 * @ena_dev: ENA communication layer struct 603 * @mtu: mtu value 604 * 605 * @return: 0 on Success and negative value otherwise. 606 */ 607 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); 608 609 /* ena_com_get_offload_settings - Retrieve the device offloads capabilities 610 * @ena_dev: ENA communication layer struct 611 * @offlad: offload return value 612 * 613 * @return: 0 on Success and negative value otherwise. 614 */ 615 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 616 struct ena_admin_feature_offload_desc *offload); 617 618 /* ena_com_rss_init - Init RSS 619 * @ena_dev: ENA communication layer struct 620 * @log_size: indirection log size 621 * 622 * Allocate RSS/RFS resources. 623 * The caller then can configure rss using ena_com_set_hash_function, 624 * ena_com_set_hash_ctrl and ena_com_indirect_table_set. 625 * 626 * @return: 0 on Success and negative value otherwise. 627 */ 628 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); 629 630 /* ena_com_rss_destroy - Destroy rss 631 * @ena_dev: ENA communication layer struct 632 * 633 * Free all the RSS/RFS resources. 634 */ 635 void ena_com_rss_destroy(struct ena_com_dev *ena_dev); 636 637 /* ena_com_get_current_hash_function - Get RSS hash function 638 * @ena_dev: ENA communication layer struct 639 * 640 * Return the current hash function. 641 * @return: 0 or one of the ena_admin_hash_functions values. 642 */ 643 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); 644 645 /* ena_com_fill_hash_function - Fill RSS hash function 646 * @ena_dev: ENA communication layer struct 647 * @func: The hash function (Toeplitz or crc) 648 * @key: Hash key (for toeplitz hash) 649 * @key_len: key length (max length 10 DW) 650 * @init_val: initial value for the hash function 651 * 652 * Fill the ena_dev resources with the desire hash function, hash key, key_len 653 * and key initial value (if needed by the hash function). 654 * To flush the key into the device the caller should call 655 * ena_com_set_hash_function. 656 * 657 * @return: 0 on Success and negative value otherwise. 658 */ 659 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 660 enum ena_admin_hash_functions func, 661 const u8 *key, u16 key_len, u32 init_val); 662 663 /* ena_com_set_hash_function - Flush the hash function and it dependencies to 664 * the device. 665 * @ena_dev: ENA communication layer struct 666 * 667 * Flush the hash function and it dependencies (key, key length and 668 * initial value) if needed. 669 * 670 * @note: Prior to this method the caller should call ena_com_fill_hash_function 671 * 672 * @return: 0 on Success and negative value otherwise. 673 */ 674 int ena_com_set_hash_function(struct ena_com_dev *ena_dev); 675 676 /* ena_com_get_hash_function - Retrieve the hash function from the device. 677 * @ena_dev: ENA communication layer struct 678 * @func: hash function 679 * 680 * Retrieve the hash function from the device. 681 * 682 * @note: If the caller called ena_com_fill_hash_function but didn't flush 683 * it to the device, the new configuration will be lost. 684 * 685 * @return: 0 on Success and negative value otherwise. 686 */ 687 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 688 enum ena_admin_hash_functions *func); 689 690 /* ena_com_get_hash_key - Retrieve the hash key 691 * @ena_dev: ENA communication layer struct 692 * @key: hash key 693 * 694 * Retrieve the hash key. 695 * 696 * @note: If the caller called ena_com_fill_hash_key but didn't flush 697 * it to the device, the new configuration will be lost. 698 * 699 * @return: 0 on Success and negative value otherwise. 700 */ 701 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key); 702 /* ena_com_fill_hash_ctrl - Fill RSS hash control 703 * @ena_dev: ENA communication layer struct. 704 * @proto: The protocol to configure. 705 * @hash_fields: bit mask of ena_admin_flow_hash_fields 706 * 707 * Fill the ena_dev resources with the desire hash control (the ethernet 708 * fields that take part of the hash) for a specific protocol. 709 * To flush the hash control to the device, the caller should call 710 * ena_com_set_hash_ctrl. 711 * 712 * @return: 0 on Success and negative value otherwise. 713 */ 714 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 715 enum ena_admin_flow_hash_proto proto, 716 u16 hash_fields); 717 718 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device. 719 * @ena_dev: ENA communication layer struct 720 * 721 * Flush the hash control (the ethernet fields that take part of the hash) 722 * 723 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. 724 * 725 * @return: 0 on Success and negative value otherwise. 726 */ 727 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); 728 729 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device. 730 * @ena_dev: ENA communication layer struct 731 * @proto: The protocol to retrieve. 732 * @fields: bit mask of ena_admin_flow_hash_fields. 733 * 734 * Retrieve the hash control from the device. 735 * 736 * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush 737 * it to the device, the new configuration will be lost. 738 * 739 * @return: 0 on Success and negative value otherwise. 740 */ 741 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 742 enum ena_admin_flow_hash_proto proto, 743 u16 *fields); 744 745 /* ena_com_set_default_hash_ctrl - Set the hash control to a default 746 * configuration. 747 * @ena_dev: ENA communication layer struct 748 * 749 * Fill the ena_dev resources with the default hash control configuration. 750 * To flush the hash control to the device, the caller should call 751 * ena_com_set_hash_ctrl. 752 * 753 * @return: 0 on Success and negative value otherwise. 754 */ 755 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); 756 757 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS 758 * indirection table 759 * @ena_dev: ENA communication layer struct. 760 * @entry_idx - indirection table entry. 761 * @entry_value - redirection value 762 * 763 * Fill a single entry of the RSS indirection table in the ena_dev resources. 764 * To flush the indirection table to the device, the called should call 765 * ena_com_indirect_table_set. 766 * 767 * @return: 0 on Success and negative value otherwise. 768 */ 769 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 770 u16 entry_idx, u16 entry_value); 771 772 /* ena_com_indirect_table_set - Flush the indirection table to the device. 773 * @ena_dev: ENA communication layer struct 774 * 775 * Flush the indirection hash control to the device. 776 * Prior to this method the caller should call ena_com_indirect_table_fill_entry 777 * 778 * @return: 0 on Success and negative value otherwise. 779 */ 780 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); 781 782 /* ena_com_indirect_table_get - Retrieve the indirection table from the device. 783 * @ena_dev: ENA communication layer struct 784 * @ind_tbl: indirection table 785 * 786 * Retrieve the RSS indirection table from the device. 787 * 788 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush 789 * it to the device, the new configuration will be lost. 790 * 791 * @return: 0 on Success and negative value otherwise. 792 */ 793 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); 794 795 /* ena_com_allocate_host_info - Allocate host info resources. 796 * @ena_dev: ENA communication layer struct 797 * 798 * @return: 0 on Success and negative value otherwise. 799 */ 800 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); 801 802 /* ena_com_allocate_debug_area - Allocate debug area. 803 * @ena_dev: ENA communication layer struct 804 * @debug_area_size - debug area size. 805 * 806 * @return: 0 on Success and negative value otherwise. 807 */ 808 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 809 u32 debug_area_size); 810 811 /* ena_com_delete_debug_area - Free the debug area resources. 812 * @ena_dev: ENA communication layer struct 813 * 814 * Free the allocated debug area. 815 */ 816 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); 817 818 /* ena_com_delete_host_info - Free the host info resources. 819 * @ena_dev: ENA communication layer struct 820 * 821 * Free the allocated host info. 822 */ 823 void ena_com_delete_host_info(struct ena_com_dev *ena_dev); 824 825 /* ena_com_set_host_attributes - Update the device with the host 826 * attributes (debug area and host info) base address. 827 * @ena_dev: ENA communication layer struct 828 * 829 * @return: 0 on Success and negative value otherwise. 830 */ 831 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); 832 833 /* ena_com_create_io_cq - Create io completion queue. 834 * @ena_dev: ENA communication layer struct 835 * @io_cq - io completion queue handler 836 837 * Create IO completion queue. 838 * 839 * @return - 0 on success, negative value on failure. 840 */ 841 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 842 struct ena_com_io_cq *io_cq); 843 844 /* ena_com_destroy_io_cq - Destroy io completion queue. 845 * @ena_dev: ENA communication layer struct 846 * @io_cq - io completion queue handler 847 848 * Destroy IO completion queue. 849 * 850 * @return - 0 on success, negative value on failure. 851 */ 852 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 853 struct ena_com_io_cq *io_cq); 854 855 /* ena_com_execute_admin_command - Execute admin command 856 * @admin_queue: admin queue. 857 * @cmd: the admin command to execute. 858 * @cmd_size: the command size. 859 * @cmd_completion: command completion return value. 860 * @cmd_comp_size: command completion size. 861 862 * Submit an admin command and then wait until the device returns a 863 * completion. 864 * The completion will be copied into cmd_comp. 865 * 866 * @return - 0 on success, negative value on failure. 867 */ 868 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 869 struct ena_admin_aq_entry *cmd, 870 size_t cmd_size, 871 struct ena_admin_acq_entry *cmd_comp, 872 size_t cmd_comp_size); 873 874 /* ena_com_init_interrupt_moderation - Init interrupt moderation 875 * @ena_dev: ENA communication layer struct 876 * 877 * @return - 0 on success, negative value on failure. 878 */ 879 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); 880 881 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation 882 * capability is supported by the device. 883 * 884 * @return - supported or not. 885 */ 886 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); 887 888 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the 889 * non-adaptive interval in Tx direction. 890 * @ena_dev: ENA communication layer struct 891 * @tx_coalesce_usecs: Interval in usec. 892 * 893 * @return - 0 on success, negative value on failure. 894 */ 895 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 896 u32 tx_coalesce_usecs); 897 898 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the 899 * non-adaptive interval in Rx direction. 900 * @ena_dev: ENA communication layer struct 901 * @rx_coalesce_usecs: Interval in usec. 902 * 903 * @return - 0 on success, negative value on failure. 904 */ 905 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 906 u32 rx_coalesce_usecs); 907 908 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the 909 * non-adaptive interval in Tx direction. 910 * @ena_dev: ENA communication layer struct 911 * 912 * @return - interval in usec 913 */ 914 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); 915 916 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the 917 * non-adaptive interval in Rx direction. 918 * @ena_dev: ENA communication layer struct 919 * 920 * @return - interval in usec 921 */ 922 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); 923 924 /* ena_com_config_dev_mode - Configure the placement policy of the device. 925 * @ena_dev: ENA communication layer struct 926 * @llq_features: LLQ feature descriptor, retrieve via 927 * ena_com_get_dev_attr_feat. 928 * @ena_llq_config: The default driver LLQ parameters configurations 929 */ 930 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 931 struct ena_admin_feature_llq_desc *llq_features, 932 struct ena_llq_configurations *llq_default_config); 933 934 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) 935 { 936 return ena_dev->adaptive_coalescing; 937 } 938 939 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) 940 { 941 ena_dev->adaptive_coalescing = true; 942 } 943 944 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) 945 { 946 ena_dev->adaptive_coalescing = false; 947 } 948 949 /* ena_com_update_intr_reg - Prepare interrupt register 950 * @intr_reg: interrupt register to update. 951 * @rx_delay_interval: Rx interval in usecs 952 * @tx_delay_interval: Tx interval in usecs 953 * @unmask: unmask enable/disable 954 * 955 * Prepare interrupt update register with the supplied parameters. 956 */ 957 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, 958 u32 rx_delay_interval, 959 u32 tx_delay_interval, 960 bool unmask) 961 { 962 intr_reg->intr_control = 0; 963 intr_reg->intr_control |= rx_delay_interval & 964 ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; 965 966 intr_reg->intr_control |= 967 (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) 968 & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; 969 970 if (unmask) 971 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; 972 } 973 974 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) 975 { 976 u16 size, buffers_num; 977 u8 *buf; 978 979 size = bounce_buf_ctrl->buffer_size; 980 buffers_num = bounce_buf_ctrl->buffers_num; 981 982 buf = bounce_buf_ctrl->base_buffer + 983 (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; 984 985 prefetchw(bounce_buf_ctrl->base_buffer + 986 (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); 987 988 return buf; 989 } 990 991 #endif /* !(ENA_COM) */ 992