1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "ena_com.h" 34 35 /*****************************************************************************/ 36 /*****************************************************************************/ 37 38 /* Timeout in micro-sec */ 39 #define ADMIN_CMD_TIMEOUT_US (3000000) 40 41 #define ENA_ASYNC_QUEUE_DEPTH 16 42 #define ENA_ADMIN_QUEUE_DEPTH 32 43 44 45 #define ENA_CTRL_MAJOR 0 46 #define ENA_CTRL_MINOR 0 47 #define ENA_CTRL_SUB_MINOR 1 48 49 #define MIN_ENA_CTRL_VER \ 50 (((ENA_CTRL_MAJOR) << \ 51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 52 ((ENA_CTRL_MINOR) << \ 53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 54 (ENA_CTRL_SUB_MINOR)) 55 56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 58 59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 60 61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 62 63 #define ENA_REGS_ADMIN_INTR_MASK 1 64 65 #define ENA_POLL_MS 5 66 67 /*****************************************************************************/ 68 /*****************************************************************************/ 69 /*****************************************************************************/ 70 71 enum ena_cmd_status { 72 ENA_CMD_SUBMITTED, 73 ENA_CMD_COMPLETED, 74 /* Abort - canceled by the driver */ 75 ENA_CMD_ABORTED, 76 }; 77 78 struct ena_comp_ctx { 79 struct completion wait_event; 80 struct ena_admin_acq_entry *user_cqe; 81 u32 comp_size; 82 enum ena_cmd_status status; 83 /* status from the device */ 84 u8 comp_status; 85 u8 cmd_opcode; 86 bool occupied; 87 }; 88 89 struct ena_com_stats_ctx { 90 struct ena_admin_aq_get_stats_cmd get_cmd; 91 struct ena_admin_acq_get_stats_resp get_resp; 92 }; 93 94 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 95 struct ena_common_mem_addr *ena_addr, 96 dma_addr_t addr) 97 { 98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 99 pr_err("dma address has more bits that the device supports\n"); 100 return -EINVAL; 101 } 102 103 ena_addr->mem_addr_low = lower_32_bits(addr); 104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); 105 106 return 0; 107 } 108 109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 110 { 111 struct ena_com_admin_sq *sq = &queue->sq; 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 113 114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 GFP_KERNEL); 116 117 if (!sq->entries) { 118 pr_err("memory allocation failed\n"); 119 return -ENOMEM; 120 } 121 122 sq->head = 0; 123 sq->tail = 0; 124 sq->phase = 1; 125 126 sq->db_addr = NULL; 127 128 return 0; 129 } 130 131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 132 { 133 struct ena_com_admin_cq *cq = &queue->cq; 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 135 136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 GFP_KERNEL); 138 139 if (!cq->entries) { 140 pr_err("memory allocation failed\n"); 141 return -ENOMEM; 142 } 143 144 cq->head = 0; 145 cq->phase = 1; 146 147 return 0; 148 } 149 150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 151 struct ena_aenq_handlers *aenq_handlers) 152 { 153 struct ena_com_aenq *aenq = &dev->aenq; 154 u32 addr_low, addr_high, aenq_caps; 155 u16 size; 156 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 GFP_KERNEL); 161 162 if (!aenq->entries) { 163 pr_err("memory allocation failed\n"); 164 return -ENOMEM; 165 } 166 167 aenq->head = aenq->q_depth; 168 aenq->phase = 1; 169 170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 172 173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 175 176 aenq_caps = 0; 177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) 179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 182 183 if (unlikely(!aenq_handlers)) { 184 pr_err("aenq handlers pointer is NULL\n"); 185 return -EINVAL; 186 } 187 188 aenq->aenq_handlers = aenq_handlers; 189 190 return 0; 191 } 192 193 static void comp_ctxt_release(struct ena_com_admin_queue *queue, 194 struct ena_comp_ctx *comp_ctx) 195 { 196 comp_ctx->occupied = false; 197 atomic_dec(&queue->outstanding_cmds); 198 } 199 200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 201 u16 command_id, bool capture) 202 { 203 if (unlikely(command_id >= queue->q_depth)) { 204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 205 command_id, queue->q_depth); 206 return NULL; 207 } 208 209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 210 pr_err("Completion context is occupied\n"); 211 return NULL; 212 } 213 214 if (capture) { 215 atomic_inc(&queue->outstanding_cmds); 216 queue->comp_ctx[command_id].occupied = true; 217 } 218 219 return &queue->comp_ctx[command_id]; 220 } 221 222 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 223 struct ena_admin_aq_entry *cmd, 224 size_t cmd_size_in_bytes, 225 struct ena_admin_acq_entry *comp, 226 size_t comp_size_in_bytes) 227 { 228 struct ena_comp_ctx *comp_ctx; 229 u16 tail_masked, cmd_id; 230 u16 queue_size_mask; 231 u16 cnt; 232 233 queue_size_mask = admin_queue->q_depth - 1; 234 235 tail_masked = admin_queue->sq.tail & queue_size_mask; 236 237 /* In case of queue FULL */ 238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); 239 if (cnt >= admin_queue->q_depth) { 240 pr_debug("admin queue is full.\n"); 241 admin_queue->stats.out_of_space++; 242 return ERR_PTR(-ENOSPC); 243 } 244 245 cmd_id = admin_queue->curr_cmd_id; 246 247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 249 250 cmd->aq_common_descriptor.command_id |= cmd_id & 251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 252 253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 254 if (unlikely(!comp_ctx)) 255 return ERR_PTR(-EINVAL); 256 257 comp_ctx->status = ENA_CMD_SUBMITTED; 258 comp_ctx->comp_size = (u32)comp_size_in_bytes; 259 comp_ctx->user_cqe = comp; 260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 261 262 reinit_completion(&comp_ctx->wait_event); 263 264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 265 266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 267 queue_size_mask; 268 269 admin_queue->sq.tail++; 270 admin_queue->stats.submitted_cmd++; 271 272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 273 admin_queue->sq.phase = !admin_queue->sq.phase; 274 275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); 276 277 return comp_ctx; 278 } 279 280 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 281 { 282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 283 struct ena_comp_ctx *comp_ctx; 284 u16 i; 285 286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); 287 if (unlikely(!queue->comp_ctx)) { 288 pr_err("memory allocation failed\n"); 289 return -ENOMEM; 290 } 291 292 for (i = 0; i < queue->q_depth; i++) { 293 comp_ctx = get_comp_ctxt(queue, i, false); 294 if (comp_ctx) 295 init_completion(&comp_ctx->wait_event); 296 } 297 298 return 0; 299 } 300 301 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 302 struct ena_admin_aq_entry *cmd, 303 size_t cmd_size_in_bytes, 304 struct ena_admin_acq_entry *comp, 305 size_t comp_size_in_bytes) 306 { 307 unsigned long flags = 0; 308 struct ena_comp_ctx *comp_ctx; 309 310 spin_lock_irqsave(&admin_queue->q_lock, flags); 311 if (unlikely(!admin_queue->running_state)) { 312 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 313 return ERR_PTR(-ENODEV); 314 } 315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 316 cmd_size_in_bytes, 317 comp, 318 comp_size_in_bytes); 319 if (IS_ERR(comp_ctx)) 320 admin_queue->running_state = false; 321 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 322 323 return comp_ctx; 324 } 325 326 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 327 struct ena_com_create_io_ctx *ctx, 328 struct ena_com_io_sq *io_sq) 329 { 330 size_t size; 331 int dev_node = 0; 332 333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 334 335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; 336 io_sq->desc_entry_size = 337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 338 sizeof(struct ena_eth_io_tx_desc) : 339 sizeof(struct ena_eth_io_rx_desc); 340 341 size = io_sq->desc_entry_size * io_sq->q_depth; 342 343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 344 dev_node = dev_to_node(ena_dev->dmadev); 345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 346 io_sq->desc_addr.virt_addr = 347 dma_alloc_coherent(ena_dev->dmadev, size, 348 &io_sq->desc_addr.phys_addr, 349 GFP_KERNEL); 350 set_dev_node(ena_dev->dmadev, dev_node); 351 if (!io_sq->desc_addr.virt_addr) { 352 io_sq->desc_addr.virt_addr = 353 dma_alloc_coherent(ena_dev->dmadev, size, 354 &io_sq->desc_addr.phys_addr, 355 GFP_KERNEL); 356 } 357 358 if (!io_sq->desc_addr.virt_addr) { 359 pr_err("memory allocation failed\n"); 360 return -ENOMEM; 361 } 362 } 363 364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 365 /* Allocate bounce buffers */ 366 io_sq->bounce_buf_ctrl.buffer_size = 367 ena_dev->llq_info.desc_list_entry_size; 368 io_sq->bounce_buf_ctrl.buffers_num = 369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 370 io_sq->bounce_buf_ctrl.next_to_use = 0; 371 372 size = io_sq->bounce_buf_ctrl.buffer_size * 373 io_sq->bounce_buf_ctrl.buffers_num; 374 375 dev_node = dev_to_node(ena_dev->dmadev); 376 set_dev_node(ena_dev->dmadev, ctx->numa_node); 377 io_sq->bounce_buf_ctrl.base_buffer = 378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 379 set_dev_node(ena_dev->dmadev, dev_node); 380 if (!io_sq->bounce_buf_ctrl.base_buffer) 381 io_sq->bounce_buf_ctrl.base_buffer = 382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 383 384 if (!io_sq->bounce_buf_ctrl.base_buffer) { 385 pr_err("bounce buffer memory allocation failed\n"); 386 return -ENOMEM; 387 } 388 389 memcpy(&io_sq->llq_info, &ena_dev->llq_info, 390 sizeof(io_sq->llq_info)); 391 392 /* Initiate the first bounce buffer */ 393 io_sq->llq_buf_ctrl.curr_bounce_buf = 394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 396 0x0, io_sq->llq_info.desc_list_entry_size); 397 io_sq->llq_buf_ctrl.descs_left_in_line = 398 io_sq->llq_info.descs_num_before_header; 399 400 if (io_sq->llq_info.max_entries_in_tx_burst > 0) 401 io_sq->entries_in_tx_burst_left = 402 io_sq->llq_info.max_entries_in_tx_burst; 403 } 404 405 io_sq->tail = 0; 406 io_sq->next_to_comp = 0; 407 io_sq->phase = 1; 408 409 return 0; 410 } 411 412 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 413 struct ena_com_create_io_ctx *ctx, 414 struct ena_com_io_cq *io_cq) 415 { 416 size_t size; 417 int prev_node = 0; 418 419 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 420 421 /* Use the basic completion descriptor for Rx */ 422 io_cq->cdesc_entry_size_in_bytes = 423 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 424 sizeof(struct ena_eth_io_tx_cdesc) : 425 sizeof(struct ena_eth_io_rx_cdesc_base); 426 427 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 428 429 prev_node = dev_to_node(ena_dev->dmadev); 430 set_dev_node(ena_dev->dmadev, ctx->numa_node); 431 io_cq->cdesc_addr.virt_addr = 432 dma_alloc_coherent(ena_dev->dmadev, size, 433 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 434 set_dev_node(ena_dev->dmadev, prev_node); 435 if (!io_cq->cdesc_addr.virt_addr) { 436 io_cq->cdesc_addr.virt_addr = 437 dma_alloc_coherent(ena_dev->dmadev, size, 438 &io_cq->cdesc_addr.phys_addr, 439 GFP_KERNEL); 440 } 441 442 if (!io_cq->cdesc_addr.virt_addr) { 443 pr_err("memory allocation failed\n"); 444 return -ENOMEM; 445 } 446 447 io_cq->phase = 1; 448 io_cq->head = 0; 449 450 return 0; 451 } 452 453 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 454 struct ena_admin_acq_entry *cqe) 455 { 456 struct ena_comp_ctx *comp_ctx; 457 u16 cmd_id; 458 459 cmd_id = cqe->acq_common_descriptor.command & 460 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 461 462 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 463 if (unlikely(!comp_ctx)) { 464 pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); 465 admin_queue->running_state = false; 466 return; 467 } 468 469 comp_ctx->status = ENA_CMD_COMPLETED; 470 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 471 472 if (comp_ctx->user_cqe) 473 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 474 475 if (!admin_queue->polling) 476 complete(&comp_ctx->wait_event); 477 } 478 479 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 480 { 481 struct ena_admin_acq_entry *cqe = NULL; 482 u16 comp_num = 0; 483 u16 head_masked; 484 u8 phase; 485 486 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 487 phase = admin_queue->cq.phase; 488 489 cqe = &admin_queue->cq.entries[head_masked]; 490 491 /* Go over all the completions */ 492 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & 493 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 494 /* Do not read the rest of the completion entry before the 495 * phase bit was validated 496 */ 497 dma_rmb(); 498 ena_com_handle_single_admin_completion(admin_queue, cqe); 499 500 head_masked++; 501 comp_num++; 502 if (unlikely(head_masked == admin_queue->q_depth)) { 503 head_masked = 0; 504 phase = !phase; 505 } 506 507 cqe = &admin_queue->cq.entries[head_masked]; 508 } 509 510 admin_queue->cq.head += comp_num; 511 admin_queue->cq.phase = phase; 512 admin_queue->sq.head += comp_num; 513 admin_queue->stats.completed_cmd += comp_num; 514 } 515 516 static int ena_com_comp_status_to_errno(u8 comp_status) 517 { 518 if (unlikely(comp_status != 0)) 519 pr_err("admin command failed[%u]\n", comp_status); 520 521 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 522 return -EINVAL; 523 524 switch (comp_status) { 525 case ENA_ADMIN_SUCCESS: 526 return 0; 527 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 528 return -ENOMEM; 529 case ENA_ADMIN_UNSUPPORTED_OPCODE: 530 return -EOPNOTSUPP; 531 case ENA_ADMIN_BAD_OPCODE: 532 case ENA_ADMIN_MALFORMED_REQUEST: 533 case ENA_ADMIN_ILLEGAL_PARAMETER: 534 case ENA_ADMIN_UNKNOWN_ERROR: 535 return -EINVAL; 536 } 537 538 return 0; 539 } 540 541 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 542 struct ena_com_admin_queue *admin_queue) 543 { 544 unsigned long flags = 0; 545 unsigned long timeout; 546 int ret; 547 548 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); 549 550 while (1) { 551 spin_lock_irqsave(&admin_queue->q_lock, flags); 552 ena_com_handle_admin_completion(admin_queue); 553 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 554 555 if (comp_ctx->status != ENA_CMD_SUBMITTED) 556 break; 557 558 if (time_is_before_jiffies(timeout)) { 559 pr_err("Wait for completion (polling) timeout\n"); 560 /* ENA didn't have any completion */ 561 spin_lock_irqsave(&admin_queue->q_lock, flags); 562 admin_queue->stats.no_completion++; 563 admin_queue->running_state = false; 564 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 565 566 ret = -ETIME; 567 goto err; 568 } 569 570 msleep(ENA_POLL_MS); 571 } 572 573 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 574 pr_err("Command was aborted\n"); 575 spin_lock_irqsave(&admin_queue->q_lock, flags); 576 admin_queue->stats.aborted_cmd++; 577 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 578 ret = -ENODEV; 579 goto err; 580 } 581 582 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", 583 comp_ctx->status); 584 585 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 586 err: 587 comp_ctxt_release(admin_queue, comp_ctx); 588 return ret; 589 } 590 591 /** 592 * Set the LLQ configurations of the firmware 593 * 594 * The driver provides only the enabled feature values to the device, 595 * which in turn, checks if they are supported. 596 */ 597 static int ena_com_set_llq(struct ena_com_dev *ena_dev) 598 { 599 struct ena_com_admin_queue *admin_queue; 600 struct ena_admin_set_feat_cmd cmd; 601 struct ena_admin_set_feat_resp resp; 602 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 603 int ret; 604 605 memset(&cmd, 0x0, sizeof(cmd)); 606 admin_queue = &ena_dev->admin_queue; 607 608 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 609 cmd.feat_common.feature_id = ENA_ADMIN_LLQ; 610 611 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; 612 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; 613 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; 614 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; 615 616 ret = ena_com_execute_admin_command(admin_queue, 617 (struct ena_admin_aq_entry *)&cmd, 618 sizeof(cmd), 619 (struct ena_admin_acq_entry *)&resp, 620 sizeof(resp)); 621 622 if (unlikely(ret)) 623 pr_err("Failed to set LLQ configurations: %d\n", ret); 624 625 return ret; 626 } 627 628 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 629 struct ena_admin_feature_llq_desc *llq_features, 630 struct ena_llq_configurations *llq_default_cfg) 631 { 632 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 633 u16 supported_feat; 634 int rc; 635 636 memset(llq_info, 0, sizeof(*llq_info)); 637 638 supported_feat = llq_features->header_location_ctrl_supported; 639 640 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { 641 llq_info->header_location_ctrl = 642 llq_default_cfg->llq_header_location; 643 } else { 644 pr_err("Invalid header location control, supported: 0x%x\n", 645 supported_feat); 646 return -EINVAL; 647 } 648 649 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { 650 supported_feat = llq_features->descriptors_stride_ctrl_supported; 651 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { 652 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; 653 } else { 654 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { 655 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 656 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { 657 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 658 } else { 659 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", 660 supported_feat); 661 return -EINVAL; 662 } 663 664 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 665 llq_default_cfg->llq_stride_ctrl, supported_feat, 666 llq_info->desc_stride_ctrl); 667 } 668 } else { 669 llq_info->desc_stride_ctrl = 0; 670 } 671 672 supported_feat = llq_features->entry_size_ctrl_supported; 673 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { 674 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; 675 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; 676 } else { 677 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { 678 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 679 llq_info->desc_list_entry_size = 128; 680 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { 681 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; 682 llq_info->desc_list_entry_size = 192; 683 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 684 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 685 llq_info->desc_list_entry_size = 256; 686 } else { 687 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", 688 supported_feat); 689 return -EINVAL; 690 } 691 692 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 693 llq_default_cfg->llq_ring_entry_size, supported_feat, 694 llq_info->desc_list_entry_size); 695 } 696 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { 697 /* The desc list entry size should be whole multiply of 8 698 * This requirement comes from __iowrite64_copy() 699 */ 700 pr_err("illegal entry size %d\n", 701 llq_info->desc_list_entry_size); 702 return -EINVAL; 703 } 704 705 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) 706 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 707 sizeof(struct ena_eth_io_tx_desc); 708 else 709 llq_info->descs_per_entry = 1; 710 711 supported_feat = llq_features->desc_num_before_header_supported; 712 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { 713 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; 714 } else { 715 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { 716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { 718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; 719 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { 720 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; 721 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { 722 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; 723 } else { 724 pr_err("Invalid descs_num_before_header, supported: 0x%x\n", 725 supported_feat); 726 return -EINVAL; 727 } 728 729 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 730 llq_default_cfg->llq_num_decs_before_header, 731 supported_feat, llq_info->descs_num_before_header); 732 } 733 734 llq_info->max_entries_in_tx_burst = 735 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); 736 737 rc = ena_com_set_llq(ena_dev); 738 if (rc) 739 pr_err("Cannot set LLQ configuration: %d\n", rc); 740 741 return rc; 742 } 743 744 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 745 struct ena_com_admin_queue *admin_queue) 746 { 747 unsigned long flags = 0; 748 int ret; 749 750 wait_for_completion_timeout(&comp_ctx->wait_event, 751 usecs_to_jiffies( 752 admin_queue->completion_timeout)); 753 754 /* In case the command wasn't completed find out the root cause. 755 * There might be 2 kinds of errors 756 * 1) No completion (timeout reached) 757 * 2) There is completion but the device didn't get any msi-x interrupt. 758 */ 759 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 760 spin_lock_irqsave(&admin_queue->q_lock, flags); 761 ena_com_handle_admin_completion(admin_queue); 762 admin_queue->stats.no_completion++; 763 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 764 765 if (comp_ctx->status == ENA_CMD_COMPLETED) { 766 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", 767 comp_ctx->cmd_opcode, 768 admin_queue->auto_polling ? "ON" : "OFF"); 769 /* Check if fallback to polling is enabled */ 770 if (admin_queue->auto_polling) 771 admin_queue->polling = true; 772 } else { 773 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n", 774 comp_ctx->cmd_opcode, comp_ctx->status); 775 } 776 /* Check if shifted to polling mode. 777 * This will happen if there is a completion without an interrupt 778 * and autopolling mode is enabled. Continuing normal execution in such case 779 */ 780 if (!admin_queue->polling) { 781 admin_queue->running_state = false; 782 ret = -ETIME; 783 goto err; 784 } 785 } 786 787 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 788 err: 789 comp_ctxt_release(admin_queue, comp_ctx); 790 return ret; 791 } 792 793 /* This method read the hardware device register through posting writes 794 * and waiting for response 795 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 796 */ 797 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 798 { 799 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 800 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 801 mmio_read->read_resp; 802 u32 mmio_read_reg, ret, i; 803 unsigned long flags = 0; 804 u32 timeout = mmio_read->reg_read_to; 805 806 might_sleep(); 807 808 if (timeout == 0) 809 timeout = ENA_REG_READ_TIMEOUT; 810 811 /* If readless is disabled, perform regular read */ 812 if (!mmio_read->readless_supported) 813 return readl(ena_dev->reg_bar + offset); 814 815 spin_lock_irqsave(&mmio_read->lock, flags); 816 mmio_read->seq_num++; 817 818 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 819 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 820 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 821 mmio_read_reg |= mmio_read->seq_num & 822 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 823 824 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 825 826 for (i = 0; i < timeout; i++) { 827 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) 828 break; 829 830 udelay(1); 831 } 832 833 if (unlikely(i == timeout)) { 834 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 835 mmio_read->seq_num, offset, read_resp->req_id, 836 read_resp->reg_off); 837 ret = ENA_MMIO_READ_TIMEOUT; 838 goto err; 839 } 840 841 if (read_resp->reg_off != offset) { 842 pr_err("Read failure: wrong offset provided\n"); 843 ret = ENA_MMIO_READ_TIMEOUT; 844 } else { 845 ret = read_resp->reg_val; 846 } 847 err: 848 spin_unlock_irqrestore(&mmio_read->lock, flags); 849 850 return ret; 851 } 852 853 /* There are two types to wait for completion. 854 * Polling mode - wait until the completion is available. 855 * Async mode - wait on wait queue until the completion is ready 856 * (or the timeout expired). 857 * It is expected that the IRQ called ena_com_handle_admin_completion 858 * to mark the completions. 859 */ 860 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 861 struct ena_com_admin_queue *admin_queue) 862 { 863 if (admin_queue->polling) 864 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 865 admin_queue); 866 867 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 868 admin_queue); 869 } 870 871 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 872 struct ena_com_io_sq *io_sq) 873 { 874 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 875 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 876 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 877 u8 direction; 878 int ret; 879 880 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 881 882 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 883 direction = ENA_ADMIN_SQ_DIRECTION_TX; 884 else 885 direction = ENA_ADMIN_SQ_DIRECTION_RX; 886 887 destroy_cmd.sq.sq_identity |= (direction << 888 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 889 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 890 891 destroy_cmd.sq.sq_idx = io_sq->idx; 892 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 893 894 ret = ena_com_execute_admin_command(admin_queue, 895 (struct ena_admin_aq_entry *)&destroy_cmd, 896 sizeof(destroy_cmd), 897 (struct ena_admin_acq_entry *)&destroy_resp, 898 sizeof(destroy_resp)); 899 900 if (unlikely(ret && (ret != -ENODEV))) 901 pr_err("failed to destroy io sq error: %d\n", ret); 902 903 return ret; 904 } 905 906 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 907 struct ena_com_io_sq *io_sq, 908 struct ena_com_io_cq *io_cq) 909 { 910 size_t size; 911 912 if (io_cq->cdesc_addr.virt_addr) { 913 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 914 915 dma_free_coherent(ena_dev->dmadev, size, 916 io_cq->cdesc_addr.virt_addr, 917 io_cq->cdesc_addr.phys_addr); 918 919 io_cq->cdesc_addr.virt_addr = NULL; 920 } 921 922 if (io_sq->desc_addr.virt_addr) { 923 size = io_sq->desc_entry_size * io_sq->q_depth; 924 925 dma_free_coherent(ena_dev->dmadev, size, 926 io_sq->desc_addr.virt_addr, 927 io_sq->desc_addr.phys_addr); 928 929 io_sq->desc_addr.virt_addr = NULL; 930 } 931 932 if (io_sq->bounce_buf_ctrl.base_buffer) { 933 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); 934 io_sq->bounce_buf_ctrl.base_buffer = NULL; 935 } 936 } 937 938 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 939 u16 exp_state) 940 { 941 u32 val, i; 942 943 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ 944 timeout = (timeout * 100) / ENA_POLL_MS; 945 946 for (i = 0; i < timeout; i++) { 947 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 948 949 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 950 pr_err("Reg read timeout occurred\n"); 951 return -ETIME; 952 } 953 954 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 955 exp_state) 956 return 0; 957 958 msleep(ENA_POLL_MS); 959 } 960 961 return -ETIME; 962 } 963 964 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 965 enum ena_admin_aq_feature_id feature_id) 966 { 967 u32 feature_mask = 1 << feature_id; 968 969 /* Device attributes is always supported */ 970 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 971 !(ena_dev->supported_features & feature_mask)) 972 return false; 973 974 return true; 975 } 976 977 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 978 struct ena_admin_get_feat_resp *get_resp, 979 enum ena_admin_aq_feature_id feature_id, 980 dma_addr_t control_buf_dma_addr, 981 u32 control_buff_size, 982 u8 feature_ver) 983 { 984 struct ena_com_admin_queue *admin_queue; 985 struct ena_admin_get_feat_cmd get_cmd; 986 int ret; 987 988 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 989 pr_debug("Feature %d isn't supported\n", feature_id); 990 return -EOPNOTSUPP; 991 } 992 993 memset(&get_cmd, 0x0, sizeof(get_cmd)); 994 admin_queue = &ena_dev->admin_queue; 995 996 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 997 998 if (control_buff_size) 999 get_cmd.aq_common_descriptor.flags = 1000 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1001 else 1002 get_cmd.aq_common_descriptor.flags = 0; 1003 1004 ret = ena_com_mem_addr_set(ena_dev, 1005 &get_cmd.control_buffer.address, 1006 control_buf_dma_addr); 1007 if (unlikely(ret)) { 1008 pr_err("memory address set failed\n"); 1009 return ret; 1010 } 1011 1012 get_cmd.control_buffer.length = control_buff_size; 1013 get_cmd.feat_common.feature_version = feature_ver; 1014 get_cmd.feat_common.feature_id = feature_id; 1015 1016 ret = ena_com_execute_admin_command(admin_queue, 1017 (struct ena_admin_aq_entry *) 1018 &get_cmd, 1019 sizeof(get_cmd), 1020 (struct ena_admin_acq_entry *) 1021 get_resp, 1022 sizeof(*get_resp)); 1023 1024 if (unlikely(ret)) 1025 pr_err("Failed to submit get_feature command %d error: %d\n", 1026 feature_id, ret); 1027 1028 return ret; 1029 } 1030 1031 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 1032 struct ena_admin_get_feat_resp *get_resp, 1033 enum ena_admin_aq_feature_id feature_id, 1034 u8 feature_ver) 1035 { 1036 return ena_com_get_feature_ex(ena_dev, 1037 get_resp, 1038 feature_id, 1039 0, 1040 0, 1041 feature_ver); 1042 } 1043 1044 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 1045 { 1046 struct ena_rss *rss = &ena_dev->rss; 1047 1048 rss->hash_key = 1049 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1050 &rss->hash_key_dma_addr, GFP_KERNEL); 1051 1052 if (unlikely(!rss->hash_key)) 1053 return -ENOMEM; 1054 1055 return 0; 1056 } 1057 1058 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 1059 { 1060 struct ena_rss *rss = &ena_dev->rss; 1061 1062 if (rss->hash_key) 1063 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1064 rss->hash_key, rss->hash_key_dma_addr); 1065 rss->hash_key = NULL; 1066 } 1067 1068 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 1069 { 1070 struct ena_rss *rss = &ena_dev->rss; 1071 1072 rss->hash_ctrl = 1073 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1074 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1075 1076 if (unlikely(!rss->hash_ctrl)) 1077 return -ENOMEM; 1078 1079 return 0; 1080 } 1081 1082 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1083 { 1084 struct ena_rss *rss = &ena_dev->rss; 1085 1086 if (rss->hash_ctrl) 1087 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1088 rss->hash_ctrl, rss->hash_ctrl_dma_addr); 1089 rss->hash_ctrl = NULL; 1090 } 1091 1092 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1093 u16 log_size) 1094 { 1095 struct ena_rss *rss = &ena_dev->rss; 1096 struct ena_admin_get_feat_resp get_resp; 1097 size_t tbl_size; 1098 int ret; 1099 1100 ret = ena_com_get_feature(ena_dev, &get_resp, 1101 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); 1102 if (unlikely(ret)) 1103 return ret; 1104 1105 if ((get_resp.u.ind_table.min_size > log_size) || 1106 (get_resp.u.ind_table.max_size < log_size)) { 1107 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1108 1 << log_size, 1 << get_resp.u.ind_table.min_size, 1109 1 << get_resp.u.ind_table.max_size); 1110 return -EINVAL; 1111 } 1112 1113 tbl_size = (1ULL << log_size) * 1114 sizeof(struct ena_admin_rss_ind_table_entry); 1115 1116 rss->rss_ind_tbl = 1117 dma_alloc_coherent(ena_dev->dmadev, tbl_size, 1118 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1119 if (unlikely(!rss->rss_ind_tbl)) 1120 goto mem_err1; 1121 1122 tbl_size = (1ULL << log_size) * sizeof(u16); 1123 rss->host_rss_ind_tbl = 1124 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); 1125 if (unlikely(!rss->host_rss_ind_tbl)) 1126 goto mem_err2; 1127 1128 rss->tbl_log_size = log_size; 1129 1130 return 0; 1131 1132 mem_err2: 1133 tbl_size = (1ULL << log_size) * 1134 sizeof(struct ena_admin_rss_ind_table_entry); 1135 1136 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1137 rss->rss_ind_tbl_dma_addr); 1138 rss->rss_ind_tbl = NULL; 1139 mem_err1: 1140 rss->tbl_log_size = 0; 1141 return -ENOMEM; 1142 } 1143 1144 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1145 { 1146 struct ena_rss *rss = &ena_dev->rss; 1147 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1148 sizeof(struct ena_admin_rss_ind_table_entry); 1149 1150 if (rss->rss_ind_tbl) 1151 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1152 rss->rss_ind_tbl_dma_addr); 1153 rss->rss_ind_tbl = NULL; 1154 1155 if (rss->host_rss_ind_tbl) 1156 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); 1157 rss->host_rss_ind_tbl = NULL; 1158 } 1159 1160 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1161 struct ena_com_io_sq *io_sq, u16 cq_idx) 1162 { 1163 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1164 struct ena_admin_aq_create_sq_cmd create_cmd; 1165 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1166 u8 direction; 1167 int ret; 1168 1169 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1170 1171 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1172 1173 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1174 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1175 else 1176 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1177 1178 create_cmd.sq_identity |= (direction << 1179 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1180 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1181 1182 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1183 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1184 1185 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1186 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1187 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1188 1189 create_cmd.sq_caps_3 |= 1190 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1191 1192 create_cmd.cq_idx = cq_idx; 1193 create_cmd.sq_depth = io_sq->q_depth; 1194 1195 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1196 ret = ena_com_mem_addr_set(ena_dev, 1197 &create_cmd.sq_ba, 1198 io_sq->desc_addr.phys_addr); 1199 if (unlikely(ret)) { 1200 pr_err("memory address set failed\n"); 1201 return ret; 1202 } 1203 } 1204 1205 ret = ena_com_execute_admin_command(admin_queue, 1206 (struct ena_admin_aq_entry *)&create_cmd, 1207 sizeof(create_cmd), 1208 (struct ena_admin_acq_entry *)&cmd_completion, 1209 sizeof(cmd_completion)); 1210 if (unlikely(ret)) { 1211 pr_err("Failed to create IO SQ. error: %d\n", ret); 1212 return ret; 1213 } 1214 1215 io_sq->idx = cmd_completion.sq_idx; 1216 1217 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1218 (uintptr_t)cmd_completion.sq_doorbell_offset); 1219 1220 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1221 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1222 + cmd_completion.llq_headers_offset); 1223 1224 io_sq->desc_addr.pbuf_dev_addr = 1225 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1226 cmd_completion.llq_descriptors_offset); 1227 } 1228 1229 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1230 1231 return ret; 1232 } 1233 1234 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1235 { 1236 struct ena_rss *rss = &ena_dev->rss; 1237 struct ena_com_io_sq *io_sq; 1238 u16 qid; 1239 int i; 1240 1241 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1242 qid = rss->host_rss_ind_tbl[i]; 1243 if (qid >= ENA_TOTAL_NUM_QUEUES) 1244 return -EINVAL; 1245 1246 io_sq = &ena_dev->io_sq_queues[qid]; 1247 1248 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1249 return -EINVAL; 1250 1251 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1258 { 1259 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1260 struct ena_rss *rss = &ena_dev->rss; 1261 u8 idx; 1262 u16 i; 1263 1264 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1265 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1266 1267 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1268 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1269 return -EINVAL; 1270 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1271 1272 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1273 return -EINVAL; 1274 1275 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1282 u16 intr_delay_resolution) 1283 { 1284 /* Initial value of intr_delay_resolution might be 0 */ 1285 u16 prev_intr_delay_resolution = 1286 ena_dev->intr_delay_resolution ? 1287 ena_dev->intr_delay_resolution : 1288 ENA_DEFAULT_INTR_DELAY_RESOLUTION; 1289 1290 if (!intr_delay_resolution) { 1291 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1292 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; 1293 } 1294 1295 /* update Rx */ 1296 ena_dev->intr_moder_rx_interval = 1297 ena_dev->intr_moder_rx_interval * 1298 prev_intr_delay_resolution / 1299 intr_delay_resolution; 1300 1301 /* update Tx */ 1302 ena_dev->intr_moder_tx_interval = 1303 ena_dev->intr_moder_tx_interval * 1304 prev_intr_delay_resolution / 1305 intr_delay_resolution; 1306 1307 ena_dev->intr_delay_resolution = intr_delay_resolution; 1308 } 1309 1310 /*****************************************************************************/ 1311 /******************************* API ******************************/ 1312 /*****************************************************************************/ 1313 1314 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1315 struct ena_admin_aq_entry *cmd, 1316 size_t cmd_size, 1317 struct ena_admin_acq_entry *comp, 1318 size_t comp_size) 1319 { 1320 struct ena_comp_ctx *comp_ctx; 1321 int ret; 1322 1323 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1324 comp, comp_size); 1325 if (IS_ERR(comp_ctx)) { 1326 if (comp_ctx == ERR_PTR(-ENODEV)) 1327 pr_debug("Failed to submit command [%ld]\n", 1328 PTR_ERR(comp_ctx)); 1329 else 1330 pr_err("Failed to submit command [%ld]\n", 1331 PTR_ERR(comp_ctx)); 1332 1333 return PTR_ERR(comp_ctx); 1334 } 1335 1336 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1337 if (unlikely(ret)) { 1338 if (admin_queue->running_state) 1339 pr_err("Failed to process command. ret = %d\n", ret); 1340 else 1341 pr_debug("Failed to process command. ret = %d\n", ret); 1342 } 1343 return ret; 1344 } 1345 1346 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1347 struct ena_com_io_cq *io_cq) 1348 { 1349 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1350 struct ena_admin_aq_create_cq_cmd create_cmd; 1351 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1352 int ret; 1353 1354 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1355 1356 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1357 1358 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1359 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1360 create_cmd.cq_caps_1 |= 1361 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1362 1363 create_cmd.msix_vector = io_cq->msix_vector; 1364 create_cmd.cq_depth = io_cq->q_depth; 1365 1366 ret = ena_com_mem_addr_set(ena_dev, 1367 &create_cmd.cq_ba, 1368 io_cq->cdesc_addr.phys_addr); 1369 if (unlikely(ret)) { 1370 pr_err("memory address set failed\n"); 1371 return ret; 1372 } 1373 1374 ret = ena_com_execute_admin_command(admin_queue, 1375 (struct ena_admin_aq_entry *)&create_cmd, 1376 sizeof(create_cmd), 1377 (struct ena_admin_acq_entry *)&cmd_completion, 1378 sizeof(cmd_completion)); 1379 if (unlikely(ret)) { 1380 pr_err("Failed to create IO CQ. error: %d\n", ret); 1381 return ret; 1382 } 1383 1384 io_cq->idx = cmd_completion.cq_idx; 1385 1386 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1387 cmd_completion.cq_interrupt_unmask_register_offset); 1388 1389 if (cmd_completion.cq_head_db_register_offset) 1390 io_cq->cq_head_db_reg = 1391 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1392 cmd_completion.cq_head_db_register_offset); 1393 1394 if (cmd_completion.numa_node_register_offset) 1395 io_cq->numa_node_cfg_reg = 1396 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1397 cmd_completion.numa_node_register_offset); 1398 1399 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1400 1401 return ret; 1402 } 1403 1404 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1405 struct ena_com_io_sq **io_sq, 1406 struct ena_com_io_cq **io_cq) 1407 { 1408 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1409 pr_err("Invalid queue number %d but the max is %d\n", qid, 1410 ENA_TOTAL_NUM_QUEUES); 1411 return -EINVAL; 1412 } 1413 1414 *io_sq = &ena_dev->io_sq_queues[qid]; 1415 *io_cq = &ena_dev->io_cq_queues[qid]; 1416 1417 return 0; 1418 } 1419 1420 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1421 { 1422 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1423 struct ena_comp_ctx *comp_ctx; 1424 u16 i; 1425 1426 if (!admin_queue->comp_ctx) 1427 return; 1428 1429 for (i = 0; i < admin_queue->q_depth; i++) { 1430 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1431 if (unlikely(!comp_ctx)) 1432 break; 1433 1434 comp_ctx->status = ENA_CMD_ABORTED; 1435 1436 complete(&comp_ctx->wait_event); 1437 } 1438 } 1439 1440 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1441 { 1442 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1443 unsigned long flags = 0; 1444 1445 spin_lock_irqsave(&admin_queue->q_lock, flags); 1446 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { 1447 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1448 msleep(ENA_POLL_MS); 1449 spin_lock_irqsave(&admin_queue->q_lock, flags); 1450 } 1451 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1452 } 1453 1454 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1455 struct ena_com_io_cq *io_cq) 1456 { 1457 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1458 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1459 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1460 int ret; 1461 1462 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1463 1464 destroy_cmd.cq_idx = io_cq->idx; 1465 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1466 1467 ret = ena_com_execute_admin_command(admin_queue, 1468 (struct ena_admin_aq_entry *)&destroy_cmd, 1469 sizeof(destroy_cmd), 1470 (struct ena_admin_acq_entry *)&destroy_resp, 1471 sizeof(destroy_resp)); 1472 1473 if (unlikely(ret && (ret != -ENODEV))) 1474 pr_err("Failed to destroy IO CQ. error: %d\n", ret); 1475 1476 return ret; 1477 } 1478 1479 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1480 { 1481 return ena_dev->admin_queue.running_state; 1482 } 1483 1484 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1485 { 1486 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1487 unsigned long flags = 0; 1488 1489 spin_lock_irqsave(&admin_queue->q_lock, flags); 1490 ena_dev->admin_queue.running_state = state; 1491 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1492 } 1493 1494 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1495 { 1496 u16 depth = ena_dev->aenq.q_depth; 1497 1498 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1499 1500 /* Init head_db to mark that all entries in the queue 1501 * are initially available 1502 */ 1503 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1504 } 1505 1506 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1507 { 1508 struct ena_com_admin_queue *admin_queue; 1509 struct ena_admin_set_feat_cmd cmd; 1510 struct ena_admin_set_feat_resp resp; 1511 struct ena_admin_get_feat_resp get_resp; 1512 int ret; 1513 1514 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); 1515 if (ret) { 1516 pr_info("Can't get aenq configuration\n"); 1517 return ret; 1518 } 1519 1520 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1521 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", 1522 get_resp.u.aenq.supported_groups, groups_flag); 1523 return -EOPNOTSUPP; 1524 } 1525 1526 memset(&cmd, 0x0, sizeof(cmd)); 1527 admin_queue = &ena_dev->admin_queue; 1528 1529 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1530 cmd.aq_common_descriptor.flags = 0; 1531 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1532 cmd.u.aenq.enabled_groups = groups_flag; 1533 1534 ret = ena_com_execute_admin_command(admin_queue, 1535 (struct ena_admin_aq_entry *)&cmd, 1536 sizeof(cmd), 1537 (struct ena_admin_acq_entry *)&resp, 1538 sizeof(resp)); 1539 1540 if (unlikely(ret)) 1541 pr_err("Failed to config AENQ ret: %d\n", ret); 1542 1543 return ret; 1544 } 1545 1546 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1547 { 1548 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1549 int width; 1550 1551 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1552 pr_err("Reg read timeout occurred\n"); 1553 return -ETIME; 1554 } 1555 1556 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1557 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1558 1559 pr_debug("ENA dma width: %d\n", width); 1560 1561 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1562 pr_err("DMA width illegal value: %d\n", width); 1563 return -EINVAL; 1564 } 1565 1566 ena_dev->dma_addr_bits = width; 1567 1568 return width; 1569 } 1570 1571 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1572 { 1573 u32 ver; 1574 u32 ctrl_ver; 1575 u32 ctrl_ver_masked; 1576 1577 /* Make sure the ENA version and the controller version are at least 1578 * as the driver expects 1579 */ 1580 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1581 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1582 ENA_REGS_CONTROLLER_VERSION_OFF); 1583 1584 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1585 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1586 pr_err("Reg read timeout occurred\n"); 1587 return -ETIME; 1588 } 1589 1590 pr_info("ena device version: %d.%d\n", 1591 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1592 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1593 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1594 1595 pr_info("ena controller version: %d.%d.%d implementation version %d\n", 1596 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> 1597 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1598 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> 1599 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1600 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1601 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1602 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1603 1604 ctrl_ver_masked = 1605 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1606 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1608 1609 /* Validate the ctrl version without the implementation ID */ 1610 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1611 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1612 return -1; 1613 } 1614 1615 return 0; 1616 } 1617 1618 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1619 { 1620 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1621 struct ena_com_admin_cq *cq = &admin_queue->cq; 1622 struct ena_com_admin_sq *sq = &admin_queue->sq; 1623 struct ena_com_aenq *aenq = &ena_dev->aenq; 1624 u16 size; 1625 1626 if (admin_queue->comp_ctx) 1627 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); 1628 admin_queue->comp_ctx = NULL; 1629 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1630 if (sq->entries) 1631 dma_free_coherent(ena_dev->dmadev, size, sq->entries, 1632 sq->dma_addr); 1633 sq->entries = NULL; 1634 1635 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1636 if (cq->entries) 1637 dma_free_coherent(ena_dev->dmadev, size, cq->entries, 1638 cq->dma_addr); 1639 cq->entries = NULL; 1640 1641 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1642 if (ena_dev->aenq.entries) 1643 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, 1644 aenq->dma_addr); 1645 aenq->entries = NULL; 1646 } 1647 1648 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1649 { 1650 u32 mask_value = 0; 1651 1652 if (polling) 1653 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1654 1655 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1656 ena_dev->admin_queue.polling = polling; 1657 } 1658 1659 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 1660 bool polling) 1661 { 1662 ena_dev->admin_queue.auto_polling = polling; 1663 } 1664 1665 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1666 { 1667 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1668 1669 spin_lock_init(&mmio_read->lock); 1670 mmio_read->read_resp = 1671 dma_alloc_coherent(ena_dev->dmadev, 1672 sizeof(*mmio_read->read_resp), 1673 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1674 if (unlikely(!mmio_read->read_resp)) 1675 goto err; 1676 1677 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1678 1679 mmio_read->read_resp->req_id = 0x0; 1680 mmio_read->seq_num = 0x0; 1681 mmio_read->readless_supported = true; 1682 1683 return 0; 1684 1685 err: 1686 1687 return -ENOMEM; 1688 } 1689 1690 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1691 { 1692 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1693 1694 mmio_read->readless_supported = readless_supported; 1695 } 1696 1697 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1698 { 1699 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1700 1701 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1702 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1703 1704 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), 1705 mmio_read->read_resp, mmio_read->read_resp_dma_addr); 1706 1707 mmio_read->read_resp = NULL; 1708 } 1709 1710 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1711 { 1712 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1713 u32 addr_low, addr_high; 1714 1715 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1716 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1717 1718 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1719 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1720 } 1721 1722 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1723 struct ena_aenq_handlers *aenq_handlers) 1724 { 1725 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1726 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1727 int ret; 1728 1729 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1730 1731 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1732 pr_err("Reg read timeout occurred\n"); 1733 return -ETIME; 1734 } 1735 1736 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1737 pr_err("Device isn't ready, abort com init\n"); 1738 return -ENODEV; 1739 } 1740 1741 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1742 1743 admin_queue->q_dmadev = ena_dev->dmadev; 1744 admin_queue->polling = false; 1745 admin_queue->curr_cmd_id = 0; 1746 1747 atomic_set(&admin_queue->outstanding_cmds, 0); 1748 1749 spin_lock_init(&admin_queue->q_lock); 1750 1751 ret = ena_com_init_comp_ctxt(admin_queue); 1752 if (ret) 1753 goto error; 1754 1755 ret = ena_com_admin_init_sq(admin_queue); 1756 if (ret) 1757 goto error; 1758 1759 ret = ena_com_admin_init_cq(admin_queue); 1760 if (ret) 1761 goto error; 1762 1763 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1764 ENA_REGS_AQ_DB_OFF); 1765 1766 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1767 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1768 1769 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1770 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1771 1772 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1773 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1774 1775 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1776 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1777 1778 aq_caps = 0; 1779 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1780 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1781 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1782 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1783 1784 acq_caps = 0; 1785 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1786 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1787 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1788 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1789 1790 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1791 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1792 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1793 if (ret) 1794 goto error; 1795 1796 admin_queue->running_state = true; 1797 1798 return 0; 1799 error: 1800 ena_com_admin_destroy(ena_dev); 1801 1802 return ret; 1803 } 1804 1805 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1806 struct ena_com_create_io_ctx *ctx) 1807 { 1808 struct ena_com_io_sq *io_sq; 1809 struct ena_com_io_cq *io_cq; 1810 int ret; 1811 1812 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1813 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", 1814 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1815 return -EINVAL; 1816 } 1817 1818 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1819 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1820 1821 memset(io_sq, 0x0, sizeof(*io_sq)); 1822 memset(io_cq, 0x0, sizeof(*io_cq)); 1823 1824 /* Init CQ */ 1825 io_cq->q_depth = ctx->queue_size; 1826 io_cq->direction = ctx->direction; 1827 io_cq->qid = ctx->qid; 1828 1829 io_cq->msix_vector = ctx->msix_vector; 1830 1831 io_sq->q_depth = ctx->queue_size; 1832 io_sq->direction = ctx->direction; 1833 io_sq->qid = ctx->qid; 1834 1835 io_sq->mem_queue_type = ctx->mem_queue_type; 1836 1837 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1838 /* header length is limited to 8 bits */ 1839 io_sq->tx_max_header_size = 1840 min_t(u32, ena_dev->tx_max_header_size, SZ_256); 1841 1842 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1843 if (ret) 1844 goto error; 1845 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1846 if (ret) 1847 goto error; 1848 1849 ret = ena_com_create_io_cq(ena_dev, io_cq); 1850 if (ret) 1851 goto error; 1852 1853 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1854 if (ret) 1855 goto destroy_io_cq; 1856 1857 return 0; 1858 1859 destroy_io_cq: 1860 ena_com_destroy_io_cq(ena_dev, io_cq); 1861 error: 1862 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1863 return ret; 1864 } 1865 1866 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1867 { 1868 struct ena_com_io_sq *io_sq; 1869 struct ena_com_io_cq *io_cq; 1870 1871 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1872 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, 1873 ENA_TOTAL_NUM_QUEUES); 1874 return; 1875 } 1876 1877 io_sq = &ena_dev->io_sq_queues[qid]; 1878 io_cq = &ena_dev->io_cq_queues[qid]; 1879 1880 ena_com_destroy_io_sq(ena_dev, io_sq); 1881 ena_com_destroy_io_cq(ena_dev, io_cq); 1882 1883 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1884 } 1885 1886 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1887 struct ena_admin_get_feat_resp *resp) 1888 { 1889 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); 1890 } 1891 1892 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1893 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1894 { 1895 struct ena_admin_get_feat_resp get_resp; 1896 int rc; 1897 1898 rc = ena_com_get_feature(ena_dev, &get_resp, 1899 ENA_ADMIN_DEVICE_ATTRIBUTES, 0); 1900 if (rc) 1901 return rc; 1902 1903 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1904 sizeof(get_resp.u.dev_attr)); 1905 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1906 1907 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1908 rc = ena_com_get_feature(ena_dev, &get_resp, 1909 ENA_ADMIN_MAX_QUEUES_EXT, 1910 ENA_FEATURE_MAX_QUEUE_EXT_VER); 1911 if (rc) 1912 return rc; 1913 1914 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) 1915 return -EINVAL; 1916 1917 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, 1918 sizeof(get_resp.u.max_queue_ext)); 1919 ena_dev->tx_max_header_size = 1920 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; 1921 } else { 1922 rc = ena_com_get_feature(ena_dev, &get_resp, 1923 ENA_ADMIN_MAX_QUEUES_NUM, 0); 1924 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1925 sizeof(get_resp.u.max_queue)); 1926 ena_dev->tx_max_header_size = 1927 get_resp.u.max_queue.max_header_size; 1928 1929 if (rc) 1930 return rc; 1931 } 1932 1933 rc = ena_com_get_feature(ena_dev, &get_resp, 1934 ENA_ADMIN_AENQ_CONFIG, 0); 1935 if (rc) 1936 return rc; 1937 1938 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 1939 sizeof(get_resp.u.aenq)); 1940 1941 rc = ena_com_get_feature(ena_dev, &get_resp, 1942 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 1943 if (rc) 1944 return rc; 1945 1946 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 1947 sizeof(get_resp.u.offload)); 1948 1949 /* Driver hints isn't mandatory admin command. So in case the 1950 * command isn't supported set driver hints to 0 1951 */ 1952 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); 1953 1954 if (!rc) 1955 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 1956 sizeof(get_resp.u.hw_hints)); 1957 else if (rc == -EOPNOTSUPP) 1958 memset(&get_feat_ctx->hw_hints, 0x0, 1959 sizeof(get_feat_ctx->hw_hints)); 1960 else 1961 return rc; 1962 1963 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); 1964 if (!rc) 1965 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 1966 sizeof(get_resp.u.llq)); 1967 else if (rc == -EOPNOTSUPP) 1968 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 1969 else 1970 return rc; 1971 1972 return 0; 1973 } 1974 1975 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 1976 { 1977 ena_com_handle_admin_completion(&ena_dev->admin_queue); 1978 } 1979 1980 /* ena_handle_specific_aenq_event: 1981 * return the handler that is relevant to the specific event group 1982 */ 1983 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 1984 u16 group) 1985 { 1986 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 1987 1988 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 1989 return aenq_handlers->handlers[group]; 1990 1991 return aenq_handlers->unimplemented_handler; 1992 } 1993 1994 /* ena_aenq_intr_handler: 1995 * handles the aenq incoming events. 1996 * pop events from the queue and apply the specific handler 1997 */ 1998 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 1999 { 2000 struct ena_admin_aenq_entry *aenq_e; 2001 struct ena_admin_aenq_common_desc *aenq_common; 2002 struct ena_com_aenq *aenq = &dev->aenq; 2003 unsigned long long timestamp; 2004 ena_aenq_handler handler_cb; 2005 u16 masked_head, processed = 0; 2006 u8 phase; 2007 2008 masked_head = aenq->head & (aenq->q_depth - 1); 2009 phase = aenq->phase; 2010 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 2011 aenq_common = &aenq_e->aenq_common_desc; 2012 2013 /* Go over all the events */ 2014 while ((READ_ONCE(aenq_common->flags) & 2015 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 2016 /* Make sure the phase bit (ownership) is as expected before 2017 * reading the rest of the descriptor. 2018 */ 2019 dma_rmb(); 2020 2021 timestamp = 2022 (unsigned long long)aenq_common->timestamp_low | 2023 ((unsigned long long)aenq_common->timestamp_high << 32); 2024 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 2025 aenq_common->group, aenq_common->syndrom, timestamp); 2026 2027 /* Handle specific event*/ 2028 handler_cb = ena_com_get_specific_aenq_cb(dev, 2029 aenq_common->group); 2030 handler_cb(data, aenq_e); /* call the actual event handler*/ 2031 2032 /* Get next event entry */ 2033 masked_head++; 2034 processed++; 2035 2036 if (unlikely(masked_head == aenq->q_depth)) { 2037 masked_head = 0; 2038 phase = !phase; 2039 } 2040 aenq_e = &aenq->entries[masked_head]; 2041 aenq_common = &aenq_e->aenq_common_desc; 2042 } 2043 2044 aenq->head += processed; 2045 aenq->phase = phase; 2046 2047 /* Don't update aenq doorbell if there weren't any processed events */ 2048 if (!processed) 2049 return; 2050 2051 /* write the aenq doorbell after all AENQ descriptors were read */ 2052 mb(); 2053 writel_relaxed((u32)aenq->head, 2054 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 2055 } 2056 2057 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2058 enum ena_regs_reset_reason_types reset_reason) 2059 { 2060 u32 stat, timeout, cap, reset_val; 2061 int rc; 2062 2063 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2064 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2065 2066 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2067 (cap == ENA_MMIO_READ_TIMEOUT))) { 2068 pr_err("Reg read32 timeout occurred\n"); 2069 return -ETIME; 2070 } 2071 2072 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2073 pr_err("Device isn't ready, can't reset device\n"); 2074 return -EINVAL; 2075 } 2076 2077 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2078 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2079 if (timeout == 0) { 2080 pr_err("Invalid timeout value\n"); 2081 return -EINVAL; 2082 } 2083 2084 /* start reset */ 2085 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2086 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2087 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2088 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2089 2090 /* Write again the MMIO read request address */ 2091 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2092 2093 rc = wait_for_reset_state(ena_dev, timeout, 2094 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2095 if (rc != 0) { 2096 pr_err("Reset indication didn't turn on\n"); 2097 return rc; 2098 } 2099 2100 /* reset done */ 2101 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2102 rc = wait_for_reset_state(ena_dev, timeout, 0); 2103 if (rc != 0) { 2104 pr_err("Reset indication didn't turn off\n"); 2105 return rc; 2106 } 2107 2108 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2109 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2110 if (timeout) 2111 /* the resolution of timeout reg is 100ms */ 2112 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2113 else 2114 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2115 2116 return 0; 2117 } 2118 2119 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2120 struct ena_com_stats_ctx *ctx, 2121 enum ena_admin_get_stats_type type) 2122 { 2123 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2124 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2125 struct ena_com_admin_queue *admin_queue; 2126 int ret; 2127 2128 admin_queue = &ena_dev->admin_queue; 2129 2130 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2131 get_cmd->aq_common_descriptor.flags = 0; 2132 get_cmd->type = type; 2133 2134 ret = ena_com_execute_admin_command(admin_queue, 2135 (struct ena_admin_aq_entry *)get_cmd, 2136 sizeof(*get_cmd), 2137 (struct ena_admin_acq_entry *)get_resp, 2138 sizeof(*get_resp)); 2139 2140 if (unlikely(ret)) 2141 pr_err("Failed to get stats. error: %d\n", ret); 2142 2143 return ret; 2144 } 2145 2146 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2147 struct ena_admin_basic_stats *stats) 2148 { 2149 struct ena_com_stats_ctx ctx; 2150 int ret; 2151 2152 memset(&ctx, 0x0, sizeof(ctx)); 2153 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2154 if (likely(ret == 0)) 2155 memcpy(stats, &ctx.get_resp.basic_stats, 2156 sizeof(ctx.get_resp.basic_stats)); 2157 2158 return ret; 2159 } 2160 2161 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 2162 { 2163 struct ena_com_admin_queue *admin_queue; 2164 struct ena_admin_set_feat_cmd cmd; 2165 struct ena_admin_set_feat_resp resp; 2166 int ret; 2167 2168 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2169 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); 2170 return -EOPNOTSUPP; 2171 } 2172 2173 memset(&cmd, 0x0, sizeof(cmd)); 2174 admin_queue = &ena_dev->admin_queue; 2175 2176 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2177 cmd.aq_common_descriptor.flags = 0; 2178 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2179 cmd.u.mtu.mtu = mtu; 2180 2181 ret = ena_com_execute_admin_command(admin_queue, 2182 (struct ena_admin_aq_entry *)&cmd, 2183 sizeof(cmd), 2184 (struct ena_admin_acq_entry *)&resp, 2185 sizeof(resp)); 2186 2187 if (unlikely(ret)) 2188 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2189 2190 return ret; 2191 } 2192 2193 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2194 struct ena_admin_feature_offload_desc *offload) 2195 { 2196 int ret; 2197 struct ena_admin_get_feat_resp resp; 2198 2199 ret = ena_com_get_feature(ena_dev, &resp, 2200 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2201 if (unlikely(ret)) { 2202 pr_err("Failed to get offload capabilities %d\n", ret); 2203 return ret; 2204 } 2205 2206 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2207 2208 return 0; 2209 } 2210 2211 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2212 { 2213 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2214 struct ena_rss *rss = &ena_dev->rss; 2215 struct ena_admin_set_feat_cmd cmd; 2216 struct ena_admin_set_feat_resp resp; 2217 struct ena_admin_get_feat_resp get_resp; 2218 int ret; 2219 2220 if (!ena_com_check_supported_feature_id(ena_dev, 2221 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2222 pr_debug("Feature %d isn't supported\n", 2223 ENA_ADMIN_RSS_HASH_FUNCTION); 2224 return -EOPNOTSUPP; 2225 } 2226 2227 /* Validate hash function is supported */ 2228 ret = ena_com_get_feature(ena_dev, &get_resp, 2229 ENA_ADMIN_RSS_HASH_FUNCTION, 0); 2230 if (unlikely(ret)) 2231 return ret; 2232 2233 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { 2234 pr_err("Func hash %d isn't supported by device, abort\n", 2235 rss->hash_func); 2236 return -EOPNOTSUPP; 2237 } 2238 2239 memset(&cmd, 0x0, sizeof(cmd)); 2240 2241 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2242 cmd.aq_common_descriptor.flags = 2243 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2244 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2245 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2246 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2247 2248 ret = ena_com_mem_addr_set(ena_dev, 2249 &cmd.control_buffer.address, 2250 rss->hash_key_dma_addr); 2251 if (unlikely(ret)) { 2252 pr_err("memory address set failed\n"); 2253 return ret; 2254 } 2255 2256 cmd.control_buffer.length = sizeof(*rss->hash_key); 2257 2258 ret = ena_com_execute_admin_command(admin_queue, 2259 (struct ena_admin_aq_entry *)&cmd, 2260 sizeof(cmd), 2261 (struct ena_admin_acq_entry *)&resp, 2262 sizeof(resp)); 2263 if (unlikely(ret)) { 2264 pr_err("Failed to set hash function %d. error: %d\n", 2265 rss->hash_func, ret); 2266 return -EINVAL; 2267 } 2268 2269 return 0; 2270 } 2271 2272 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2273 enum ena_admin_hash_functions func, 2274 const u8 *key, u16 key_len, u32 init_val) 2275 { 2276 struct ena_rss *rss = &ena_dev->rss; 2277 struct ena_admin_get_feat_resp get_resp; 2278 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2279 rss->hash_key; 2280 int rc; 2281 2282 /* Make sure size is a mult of DWs */ 2283 if (unlikely(key_len & 0x3)) 2284 return -EINVAL; 2285 2286 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2287 ENA_ADMIN_RSS_HASH_FUNCTION, 2288 rss->hash_key_dma_addr, 2289 sizeof(*rss->hash_key), 0); 2290 if (unlikely(rc)) 2291 return rc; 2292 2293 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2294 pr_err("Flow hash function %d isn't supported\n", func); 2295 return -EOPNOTSUPP; 2296 } 2297 2298 switch (func) { 2299 case ENA_ADMIN_TOEPLITZ: 2300 if (key_len > sizeof(hash_key->key)) { 2301 pr_err("key len (%hu) is bigger than the max supported (%zu)\n", 2302 key_len, sizeof(hash_key->key)); 2303 return -EINVAL; 2304 } 2305 2306 memcpy(hash_key->key, key, key_len); 2307 rss->hash_init_val = init_val; 2308 hash_key->keys_num = key_len >> 2; 2309 break; 2310 case ENA_ADMIN_CRC32: 2311 rss->hash_init_val = init_val; 2312 break; 2313 default: 2314 pr_err("Invalid hash function (%d)\n", func); 2315 return -EINVAL; 2316 } 2317 2318 rss->hash_func = func; 2319 rc = ena_com_set_hash_function(ena_dev); 2320 2321 /* Restore the old function */ 2322 if (unlikely(rc)) 2323 ena_com_get_hash_function(ena_dev, NULL, NULL); 2324 2325 return rc; 2326 } 2327 2328 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2329 enum ena_admin_hash_functions *func, 2330 u8 *key) 2331 { 2332 struct ena_rss *rss = &ena_dev->rss; 2333 struct ena_admin_get_feat_resp get_resp; 2334 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2335 rss->hash_key; 2336 int rc; 2337 2338 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2339 ENA_ADMIN_RSS_HASH_FUNCTION, 2340 rss->hash_key_dma_addr, 2341 sizeof(*rss->hash_key), 0); 2342 if (unlikely(rc)) 2343 return rc; 2344 2345 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2346 if (func) 2347 *func = rss->hash_func; 2348 2349 if (key) 2350 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2351 2352 return 0; 2353 } 2354 2355 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2356 enum ena_admin_flow_hash_proto proto, 2357 u16 *fields) 2358 { 2359 struct ena_rss *rss = &ena_dev->rss; 2360 struct ena_admin_get_feat_resp get_resp; 2361 int rc; 2362 2363 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2364 ENA_ADMIN_RSS_HASH_INPUT, 2365 rss->hash_ctrl_dma_addr, 2366 sizeof(*rss->hash_ctrl), 0); 2367 if (unlikely(rc)) 2368 return rc; 2369 2370 if (fields) 2371 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2372 2373 return 0; 2374 } 2375 2376 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2377 { 2378 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2379 struct ena_rss *rss = &ena_dev->rss; 2380 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2381 struct ena_admin_set_feat_cmd cmd; 2382 struct ena_admin_set_feat_resp resp; 2383 int ret; 2384 2385 if (!ena_com_check_supported_feature_id(ena_dev, 2386 ENA_ADMIN_RSS_HASH_INPUT)) { 2387 pr_debug("Feature %d isn't supported\n", 2388 ENA_ADMIN_RSS_HASH_INPUT); 2389 return -EOPNOTSUPP; 2390 } 2391 2392 memset(&cmd, 0x0, sizeof(cmd)); 2393 2394 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2395 cmd.aq_common_descriptor.flags = 2396 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2397 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2398 cmd.u.flow_hash_input.enabled_input_sort = 2399 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2400 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2401 2402 ret = ena_com_mem_addr_set(ena_dev, 2403 &cmd.control_buffer.address, 2404 rss->hash_ctrl_dma_addr); 2405 if (unlikely(ret)) { 2406 pr_err("memory address set failed\n"); 2407 return ret; 2408 } 2409 cmd.control_buffer.length = sizeof(*hash_ctrl); 2410 2411 ret = ena_com_execute_admin_command(admin_queue, 2412 (struct ena_admin_aq_entry *)&cmd, 2413 sizeof(cmd), 2414 (struct ena_admin_acq_entry *)&resp, 2415 sizeof(resp)); 2416 if (unlikely(ret)) 2417 pr_err("Failed to set hash input. error: %d\n", ret); 2418 2419 return ret; 2420 } 2421 2422 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2423 { 2424 struct ena_rss *rss = &ena_dev->rss; 2425 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2426 rss->hash_ctrl; 2427 u16 available_fields = 0; 2428 int rc, i; 2429 2430 /* Get the supported hash input */ 2431 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2432 if (unlikely(rc)) 2433 return rc; 2434 2435 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2436 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2437 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2438 2439 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2440 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2441 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2442 2443 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2444 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2445 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2446 2447 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2448 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2449 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2450 2451 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2452 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2453 2454 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2455 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2456 2457 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2458 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2459 2460 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2461 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2462 2463 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2464 available_fields = hash_ctrl->selected_fields[i].fields & 2465 hash_ctrl->supported_fields[i].fields; 2466 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2467 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2468 i, hash_ctrl->supported_fields[i].fields, 2469 hash_ctrl->selected_fields[i].fields); 2470 return -EOPNOTSUPP; 2471 } 2472 } 2473 2474 rc = ena_com_set_hash_ctrl(ena_dev); 2475 2476 /* In case of failure, restore the old hash ctrl */ 2477 if (unlikely(rc)) 2478 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2479 2480 return rc; 2481 } 2482 2483 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2484 enum ena_admin_flow_hash_proto proto, 2485 u16 hash_fields) 2486 { 2487 struct ena_rss *rss = &ena_dev->rss; 2488 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2489 u16 supported_fields; 2490 int rc; 2491 2492 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2493 pr_err("Invalid proto num (%u)\n", proto); 2494 return -EINVAL; 2495 } 2496 2497 /* Get the ctrl table */ 2498 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2499 if (unlikely(rc)) 2500 return rc; 2501 2502 /* Make sure all the fields are supported */ 2503 supported_fields = hash_ctrl->supported_fields[proto].fields; 2504 if ((hash_fields & supported_fields) != hash_fields) { 2505 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2506 proto, hash_fields, supported_fields); 2507 } 2508 2509 hash_ctrl->selected_fields[proto].fields = hash_fields; 2510 2511 rc = ena_com_set_hash_ctrl(ena_dev); 2512 2513 /* In case of failure, restore the old hash ctrl */ 2514 if (unlikely(rc)) 2515 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2516 2517 return 0; 2518 } 2519 2520 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2521 u16 entry_idx, u16 entry_value) 2522 { 2523 struct ena_rss *rss = &ena_dev->rss; 2524 2525 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2526 return -EINVAL; 2527 2528 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2529 return -EINVAL; 2530 2531 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2532 2533 return 0; 2534 } 2535 2536 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2537 { 2538 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2539 struct ena_rss *rss = &ena_dev->rss; 2540 struct ena_admin_set_feat_cmd cmd; 2541 struct ena_admin_set_feat_resp resp; 2542 int ret; 2543 2544 if (!ena_com_check_supported_feature_id( 2545 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2546 pr_debug("Feature %d isn't supported\n", 2547 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2548 return -EOPNOTSUPP; 2549 } 2550 2551 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2552 if (ret) { 2553 pr_err("Failed to convert host indirection table to device table\n"); 2554 return ret; 2555 } 2556 2557 memset(&cmd, 0x0, sizeof(cmd)); 2558 2559 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2560 cmd.aq_common_descriptor.flags = 2561 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2562 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2563 cmd.u.ind_table.size = rss->tbl_log_size; 2564 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2565 2566 ret = ena_com_mem_addr_set(ena_dev, 2567 &cmd.control_buffer.address, 2568 rss->rss_ind_tbl_dma_addr); 2569 if (unlikely(ret)) { 2570 pr_err("memory address set failed\n"); 2571 return ret; 2572 } 2573 2574 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2575 sizeof(struct ena_admin_rss_ind_table_entry); 2576 2577 ret = ena_com_execute_admin_command(admin_queue, 2578 (struct ena_admin_aq_entry *)&cmd, 2579 sizeof(cmd), 2580 (struct ena_admin_acq_entry *)&resp, 2581 sizeof(resp)); 2582 2583 if (unlikely(ret)) 2584 pr_err("Failed to set indirect table. error: %d\n", ret); 2585 2586 return ret; 2587 } 2588 2589 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2590 { 2591 struct ena_rss *rss = &ena_dev->rss; 2592 struct ena_admin_get_feat_resp get_resp; 2593 u32 tbl_size; 2594 int i, rc; 2595 2596 tbl_size = (1ULL << rss->tbl_log_size) * 2597 sizeof(struct ena_admin_rss_ind_table_entry); 2598 2599 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2600 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2601 rss->rss_ind_tbl_dma_addr, 2602 tbl_size, 0); 2603 if (unlikely(rc)) 2604 return rc; 2605 2606 if (!ind_tbl) 2607 return 0; 2608 2609 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2610 if (unlikely(rc)) 2611 return rc; 2612 2613 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2614 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2615 2616 return 0; 2617 } 2618 2619 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2620 { 2621 int rc; 2622 2623 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2624 2625 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2626 if (unlikely(rc)) 2627 goto err_indr_tbl; 2628 2629 rc = ena_com_hash_key_allocate(ena_dev); 2630 if (unlikely(rc)) 2631 goto err_hash_key; 2632 2633 rc = ena_com_hash_ctrl_init(ena_dev); 2634 if (unlikely(rc)) 2635 goto err_hash_ctrl; 2636 2637 return 0; 2638 2639 err_hash_ctrl: 2640 ena_com_hash_key_destroy(ena_dev); 2641 err_hash_key: 2642 ena_com_indirect_table_destroy(ena_dev); 2643 err_indr_tbl: 2644 2645 return rc; 2646 } 2647 2648 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2649 { 2650 ena_com_indirect_table_destroy(ena_dev); 2651 ena_com_hash_key_destroy(ena_dev); 2652 ena_com_hash_ctrl_destroy(ena_dev); 2653 2654 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2655 } 2656 2657 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2658 { 2659 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2660 2661 host_attr->host_info = 2662 dma_alloc_coherent(ena_dev->dmadev, SZ_4K, 2663 &host_attr->host_info_dma_addr, GFP_KERNEL); 2664 if (unlikely(!host_attr->host_info)) 2665 return -ENOMEM; 2666 2667 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << 2668 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | 2669 (ENA_COMMON_SPEC_VERSION_MINOR)); 2670 2671 return 0; 2672 } 2673 2674 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2675 u32 debug_area_size) 2676 { 2677 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2678 2679 host_attr->debug_area_virt_addr = 2680 dma_alloc_coherent(ena_dev->dmadev, debug_area_size, 2681 &host_attr->debug_area_dma_addr, 2682 GFP_KERNEL); 2683 if (unlikely(!host_attr->debug_area_virt_addr)) { 2684 host_attr->debug_area_size = 0; 2685 return -ENOMEM; 2686 } 2687 2688 host_attr->debug_area_size = debug_area_size; 2689 2690 return 0; 2691 } 2692 2693 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2694 { 2695 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2696 2697 if (host_attr->host_info) { 2698 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, 2699 host_attr->host_info_dma_addr); 2700 host_attr->host_info = NULL; 2701 } 2702 } 2703 2704 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2705 { 2706 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2707 2708 if (host_attr->debug_area_virt_addr) { 2709 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, 2710 host_attr->debug_area_virt_addr, 2711 host_attr->debug_area_dma_addr); 2712 host_attr->debug_area_virt_addr = NULL; 2713 } 2714 } 2715 2716 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2717 { 2718 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2719 struct ena_com_admin_queue *admin_queue; 2720 struct ena_admin_set_feat_cmd cmd; 2721 struct ena_admin_set_feat_resp resp; 2722 2723 int ret; 2724 2725 /* Host attribute config is called before ena_com_get_dev_attr_feat 2726 * so ena_com can't check if the feature is supported. 2727 */ 2728 2729 memset(&cmd, 0x0, sizeof(cmd)); 2730 admin_queue = &ena_dev->admin_queue; 2731 2732 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2733 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2734 2735 ret = ena_com_mem_addr_set(ena_dev, 2736 &cmd.u.host_attr.debug_ba, 2737 host_attr->debug_area_dma_addr); 2738 if (unlikely(ret)) { 2739 pr_err("memory address set failed\n"); 2740 return ret; 2741 } 2742 2743 ret = ena_com_mem_addr_set(ena_dev, 2744 &cmd.u.host_attr.os_info_ba, 2745 host_attr->host_info_dma_addr); 2746 if (unlikely(ret)) { 2747 pr_err("memory address set failed\n"); 2748 return ret; 2749 } 2750 2751 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2752 2753 ret = ena_com_execute_admin_command(admin_queue, 2754 (struct ena_admin_aq_entry *)&cmd, 2755 sizeof(cmd), 2756 (struct ena_admin_acq_entry *)&resp, 2757 sizeof(resp)); 2758 2759 if (unlikely(ret)) 2760 pr_err("Failed to set host attributes: %d\n", ret); 2761 2762 return ret; 2763 } 2764 2765 /* Interrupt moderation */ 2766 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2767 { 2768 return ena_com_check_supported_feature_id(ena_dev, 2769 ENA_ADMIN_INTERRUPT_MODERATION); 2770 } 2771 2772 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, 2773 u32 intr_delay_resolution, 2774 u32 *intr_moder_interval) 2775 { 2776 if (!intr_delay_resolution) { 2777 pr_err("Illegal interrupt delay granularity value\n"); 2778 return -EFAULT; 2779 } 2780 2781 *intr_moder_interval = coalesce_usecs / intr_delay_resolution; 2782 2783 return 0; 2784 } 2785 2786 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2787 u32 tx_coalesce_usecs) 2788 { 2789 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, 2790 ena_dev->intr_delay_resolution, 2791 &ena_dev->intr_moder_tx_interval); 2792 } 2793 2794 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2795 u32 rx_coalesce_usecs) 2796 { 2797 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, 2798 ena_dev->intr_delay_resolution, 2799 &ena_dev->intr_moder_rx_interval); 2800 } 2801 2802 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2803 { 2804 struct ena_admin_get_feat_resp get_resp; 2805 u16 delay_resolution; 2806 int rc; 2807 2808 rc = ena_com_get_feature(ena_dev, &get_resp, 2809 ENA_ADMIN_INTERRUPT_MODERATION, 0); 2810 2811 if (rc) { 2812 if (rc == -EOPNOTSUPP) { 2813 pr_debug("Feature %d isn't supported\n", 2814 ENA_ADMIN_INTERRUPT_MODERATION); 2815 rc = 0; 2816 } else { 2817 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2818 rc); 2819 } 2820 2821 /* no moderation supported, disable adaptive support */ 2822 ena_com_disable_adaptive_moderation(ena_dev); 2823 return rc; 2824 } 2825 2826 /* if moderation is supported by device we set adaptive moderation */ 2827 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2828 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2829 2830 /* Disable adaptive moderation by default - can be enabled later */ 2831 ena_com_disable_adaptive_moderation(ena_dev); 2832 2833 return 0; 2834 } 2835 2836 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2837 { 2838 return ena_dev->intr_moder_tx_interval; 2839 } 2840 2841 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2842 { 2843 return ena_dev->intr_moder_rx_interval; 2844 } 2845 2846 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 2847 struct ena_admin_feature_llq_desc *llq_features, 2848 struct ena_llq_configurations *llq_default_cfg) 2849 { 2850 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 2851 int rc; 2852 2853 if (!llq_features->max_llq_num) { 2854 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2855 return 0; 2856 } 2857 2858 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); 2859 if (rc) 2860 return rc; 2861 2862 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - 2863 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); 2864 2865 if (unlikely(ena_dev->tx_max_header_size == 0)) { 2866 pr_err("the size of the LLQ entry is smaller than needed\n"); 2867 return -EINVAL; 2868 } 2869 2870 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 2871 2872 return 0; 2873 } 2874