1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_check_sq_alive 56 * @hw: pointer to the HW struct 57 * @cq: pointer to the specific Control queue 58 * 59 * Returns true if Queue is enabled else false. 60 */ 61 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 62 { 63 /* check both queue-length and queue-enable fields */ 64 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 65 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 66 cq->sq.len_ena_mask)) == 67 (cq->num_sq_entries | cq->sq.len_ena_mask); 68 69 return false; 70 } 71 72 /** 73 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 74 * @hw: pointer to the hardware structure 75 * @cq: pointer to the specific Control queue 76 */ 77 static enum ice_status 78 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 79 { 80 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 81 82 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 83 &cq->sq.desc_buf.pa, 84 GFP_KERNEL | __GFP_ZERO); 85 if (!cq->sq.desc_buf.va) 86 return ICE_ERR_NO_MEMORY; 87 cq->sq.desc_buf.size = size; 88 89 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 90 sizeof(struct ice_sq_cd), GFP_KERNEL); 91 if (!cq->sq.cmd_buf) { 92 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 93 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 94 cq->sq.desc_buf.va = NULL; 95 cq->sq.desc_buf.pa = 0; 96 cq->sq.desc_buf.size = 0; 97 return ICE_ERR_NO_MEMORY; 98 } 99 100 return 0; 101 } 102 103 /** 104 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 105 * @hw: pointer to the hardware structure 106 * @cq: pointer to the specific Control queue 107 */ 108 static enum ice_status 109 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 110 { 111 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 112 113 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 114 &cq->rq.desc_buf.pa, 115 GFP_KERNEL | __GFP_ZERO); 116 if (!cq->rq.desc_buf.va) 117 return ICE_ERR_NO_MEMORY; 118 cq->rq.desc_buf.size = size; 119 return 0; 120 } 121 122 /** 123 * ice_free_cq_ring - Free control queue ring 124 * @hw: pointer to the hardware structure 125 * @ring: pointer to the specific control queue ring 126 * 127 * This assumes the posted buffers have already been cleaned 128 * and de-allocated 129 */ 130 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 131 { 132 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 133 ring->desc_buf.va, ring->desc_buf.pa); 134 ring->desc_buf.va = NULL; 135 ring->desc_buf.pa = 0; 136 ring->desc_buf.size = 0; 137 } 138 139 /** 140 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 141 * @hw: pointer to the hardware structure 142 * @cq: pointer to the specific Control queue 143 */ 144 static enum ice_status 145 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 146 { 147 int i; 148 149 /* We'll be allocating the buffer info memory first, then we can 150 * allocate the mapped buffers for the event processing 151 */ 152 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 153 sizeof(cq->rq.desc_buf), GFP_KERNEL); 154 if (!cq->rq.dma_head) 155 return ICE_ERR_NO_MEMORY; 156 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 157 158 /* allocate the mapped buffers */ 159 for (i = 0; i < cq->num_rq_entries; i++) { 160 struct ice_aq_desc *desc; 161 struct ice_dma_mem *bi; 162 163 bi = &cq->rq.r.rq_bi[i]; 164 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 165 cq->rq_buf_size, &bi->pa, 166 GFP_KERNEL | __GFP_ZERO); 167 if (!bi->va) 168 goto unwind_alloc_rq_bufs; 169 bi->size = cq->rq_buf_size; 170 171 /* now configure the descriptors for use */ 172 desc = ICE_CTL_Q_DESC(cq->rq, i); 173 174 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 175 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 176 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 177 desc->opcode = 0; 178 /* This is in accordance with Admin queue design, there is no 179 * register for buffer size configuration 180 */ 181 desc->datalen = cpu_to_le16(bi->size); 182 desc->retval = 0; 183 desc->cookie_high = 0; 184 desc->cookie_low = 0; 185 desc->params.generic.addr_high = 186 cpu_to_le32(upper_32_bits(bi->pa)); 187 desc->params.generic.addr_low = 188 cpu_to_le32(lower_32_bits(bi->pa)); 189 desc->params.generic.param0 = 0; 190 desc->params.generic.param1 = 0; 191 } 192 return 0; 193 194 unwind_alloc_rq_bufs: 195 /* don't try to free the one that failed... */ 196 i--; 197 for (; i >= 0; i--) { 198 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 199 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 200 cq->rq.r.rq_bi[i].va = NULL; 201 cq->rq.r.rq_bi[i].pa = 0; 202 cq->rq.r.rq_bi[i].size = 0; 203 } 204 cq->rq.r.rq_bi = NULL; 205 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 206 cq->rq.dma_head = NULL; 207 208 return ICE_ERR_NO_MEMORY; 209 } 210 211 /** 212 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 213 * @hw: pointer to the hardware structure 214 * @cq: pointer to the specific Control queue 215 */ 216 static enum ice_status 217 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 218 { 219 int i; 220 221 /* No mapped memory needed yet, just the buffer info structures */ 222 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 223 sizeof(cq->sq.desc_buf), GFP_KERNEL); 224 if (!cq->sq.dma_head) 225 return ICE_ERR_NO_MEMORY; 226 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 227 228 /* allocate the mapped buffers */ 229 for (i = 0; i < cq->num_sq_entries; i++) { 230 struct ice_dma_mem *bi; 231 232 bi = &cq->sq.r.sq_bi[i]; 233 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 234 cq->sq_buf_size, &bi->pa, 235 GFP_KERNEL | __GFP_ZERO); 236 if (!bi->va) 237 goto unwind_alloc_sq_bufs; 238 bi->size = cq->sq_buf_size; 239 } 240 return 0; 241 242 unwind_alloc_sq_bufs: 243 /* don't try to free the one that failed... */ 244 i--; 245 for (; i >= 0; i--) { 246 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 247 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 248 cq->sq.r.sq_bi[i].va = NULL; 249 cq->sq.r.sq_bi[i].pa = 0; 250 cq->sq.r.sq_bi[i].size = 0; 251 } 252 cq->sq.r.sq_bi = NULL; 253 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 254 cq->sq.dma_head = NULL; 255 256 return ICE_ERR_NO_MEMORY; 257 } 258 259 static enum ice_status 260 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 261 { 262 /* Clear Head and Tail */ 263 wr32(hw, ring->head, 0); 264 wr32(hw, ring->tail, 0); 265 266 /* set starting point */ 267 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 268 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 269 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 270 271 /* Check one register to verify that config was applied */ 272 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 273 return ICE_ERR_AQ_ERROR; 274 275 return 0; 276 } 277 278 /** 279 * ice_cfg_sq_regs - configure Control ATQ registers 280 * @hw: pointer to the hardware structure 281 * @cq: pointer to the specific Control queue 282 * 283 * Configure base address and length registers for the transmit queue 284 */ 285 static enum ice_status 286 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 287 { 288 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 289 } 290 291 /** 292 * ice_cfg_rq_regs - configure Control ARQ register 293 * @hw: pointer to the hardware structure 294 * @cq: pointer to the specific Control queue 295 * 296 * Configure base address and length registers for the receive (event queue) 297 */ 298 static enum ice_status 299 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 300 { 301 enum ice_status status; 302 303 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 304 if (status) 305 return status; 306 307 /* Update tail in the HW to post pre-allocated buffers */ 308 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 309 310 return 0; 311 } 312 313 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 314 do { \ 315 /* free descriptors */ \ 316 if ((qi)->ring.r.ring##_bi) { \ 317 int i; \ 318 \ 319 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 320 if ((qi)->ring.r.ring##_bi[i].pa) { \ 321 dmam_free_coherent(ice_hw_to_dev(hw), \ 322 (qi)->ring.r.ring##_bi[i].size, \ 323 (qi)->ring.r.ring##_bi[i].va, \ 324 (qi)->ring.r.ring##_bi[i].pa); \ 325 (qi)->ring.r.ring##_bi[i].va = NULL;\ 326 (qi)->ring.r.ring##_bi[i].pa = 0;\ 327 (qi)->ring.r.ring##_bi[i].size = 0;\ 328 } \ 329 } \ 330 /* free the buffer info list */ \ 331 if ((qi)->ring.cmd_buf) \ 332 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 333 /* free DMA head */ \ 334 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 335 } while (0) 336 337 /** 338 * ice_init_sq - main initialization routine for Control ATQ 339 * @hw: pointer to the hardware structure 340 * @cq: pointer to the specific Control queue 341 * 342 * This is the main initialization routine for the Control Send Queue 343 * Prior to calling this function, the driver *MUST* set the following fields 344 * in the cq->structure: 345 * - cq->num_sq_entries 346 * - cq->sq_buf_size 347 * 348 * Do *NOT* hold the lock when calling this as the memory allocation routines 349 * called are not going to be atomic context safe 350 */ 351 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 352 { 353 enum ice_status ret_code; 354 355 if (cq->sq.count > 0) { 356 /* queue already initialized */ 357 ret_code = ICE_ERR_NOT_READY; 358 goto init_ctrlq_exit; 359 } 360 361 /* verify input for valid configuration */ 362 if (!cq->num_sq_entries || !cq->sq_buf_size) { 363 ret_code = ICE_ERR_CFG; 364 goto init_ctrlq_exit; 365 } 366 367 cq->sq.next_to_use = 0; 368 cq->sq.next_to_clean = 0; 369 370 /* allocate the ring memory */ 371 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 372 if (ret_code) 373 goto init_ctrlq_exit; 374 375 /* allocate buffers in the rings */ 376 ret_code = ice_alloc_sq_bufs(hw, cq); 377 if (ret_code) 378 goto init_ctrlq_free_rings; 379 380 /* initialize base registers */ 381 ret_code = ice_cfg_sq_regs(hw, cq); 382 if (ret_code) 383 goto init_ctrlq_free_rings; 384 385 /* success! */ 386 cq->sq.count = cq->num_sq_entries; 387 goto init_ctrlq_exit; 388 389 init_ctrlq_free_rings: 390 ICE_FREE_CQ_BUFS(hw, cq, sq); 391 ice_free_cq_ring(hw, &cq->sq); 392 393 init_ctrlq_exit: 394 return ret_code; 395 } 396 397 /** 398 * ice_init_rq - initialize ARQ 399 * @hw: pointer to the hardware structure 400 * @cq: pointer to the specific Control queue 401 * 402 * The main initialization routine for the Admin Receive (Event) Queue. 403 * Prior to calling this function, the driver *MUST* set the following fields 404 * in the cq->structure: 405 * - cq->num_rq_entries 406 * - cq->rq_buf_size 407 * 408 * Do *NOT* hold the lock when calling this as the memory allocation routines 409 * called are not going to be atomic context safe 410 */ 411 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 412 { 413 enum ice_status ret_code; 414 415 if (cq->rq.count > 0) { 416 /* queue already initialized */ 417 ret_code = ICE_ERR_NOT_READY; 418 goto init_ctrlq_exit; 419 } 420 421 /* verify input for valid configuration */ 422 if (!cq->num_rq_entries || !cq->rq_buf_size) { 423 ret_code = ICE_ERR_CFG; 424 goto init_ctrlq_exit; 425 } 426 427 cq->rq.next_to_use = 0; 428 cq->rq.next_to_clean = 0; 429 430 /* allocate the ring memory */ 431 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 432 if (ret_code) 433 goto init_ctrlq_exit; 434 435 /* allocate buffers in the rings */ 436 ret_code = ice_alloc_rq_bufs(hw, cq); 437 if (ret_code) 438 goto init_ctrlq_free_rings; 439 440 /* initialize base registers */ 441 ret_code = ice_cfg_rq_regs(hw, cq); 442 if (ret_code) 443 goto init_ctrlq_free_rings; 444 445 /* success! */ 446 cq->rq.count = cq->num_rq_entries; 447 goto init_ctrlq_exit; 448 449 init_ctrlq_free_rings: 450 ICE_FREE_CQ_BUFS(hw, cq, rq); 451 ice_free_cq_ring(hw, &cq->rq); 452 453 init_ctrlq_exit: 454 return ret_code; 455 } 456 457 /** 458 * ice_shutdown_sq - shutdown the Control ATQ 459 * @hw: pointer to the hardware structure 460 * @cq: pointer to the specific Control queue 461 * 462 * The main shutdown routine for the Control Transmit Queue 463 */ 464 static enum ice_status 465 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 466 { 467 enum ice_status ret_code = 0; 468 469 mutex_lock(&cq->sq_lock); 470 471 if (!cq->sq.count) { 472 ret_code = ICE_ERR_NOT_READY; 473 goto shutdown_sq_out; 474 } 475 476 /* Stop firmware AdminQ processing */ 477 wr32(hw, cq->sq.head, 0); 478 wr32(hw, cq->sq.tail, 0); 479 wr32(hw, cq->sq.len, 0); 480 wr32(hw, cq->sq.bal, 0); 481 wr32(hw, cq->sq.bah, 0); 482 483 cq->sq.count = 0; /* to indicate uninitialized queue */ 484 485 /* free ring buffers and the ring itself */ 486 ICE_FREE_CQ_BUFS(hw, cq, sq); 487 ice_free_cq_ring(hw, &cq->sq); 488 489 shutdown_sq_out: 490 mutex_unlock(&cq->sq_lock); 491 return ret_code; 492 } 493 494 /** 495 * ice_aq_ver_check - Check the reported AQ API version. 496 * @hw: pointer to the hardware structure 497 * 498 * Checks if the driver should load on a given AQ API version. 499 * 500 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 501 */ 502 static bool ice_aq_ver_check(struct ice_hw *hw) 503 { 504 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 505 /* Major API version is newer than expected, don't load */ 506 dev_warn(ice_hw_to_dev(hw), 507 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 508 return false; 509 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 510 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 511 dev_info(ice_hw_to_dev(hw), 512 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 513 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 514 dev_info(ice_hw_to_dev(hw), 515 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 516 } else { 517 /* Major API version is older than expected, log a warning */ 518 dev_info(ice_hw_to_dev(hw), 519 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 520 } 521 return true; 522 } 523 524 /** 525 * ice_shutdown_rq - shutdown Control ARQ 526 * @hw: pointer to the hardware structure 527 * @cq: pointer to the specific Control queue 528 * 529 * The main shutdown routine for the Control Receive Queue 530 */ 531 static enum ice_status 532 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 533 { 534 enum ice_status ret_code = 0; 535 536 mutex_lock(&cq->rq_lock); 537 538 if (!cq->rq.count) { 539 ret_code = ICE_ERR_NOT_READY; 540 goto shutdown_rq_out; 541 } 542 543 /* Stop Control Queue processing */ 544 wr32(hw, cq->rq.head, 0); 545 wr32(hw, cq->rq.tail, 0); 546 wr32(hw, cq->rq.len, 0); 547 wr32(hw, cq->rq.bal, 0); 548 wr32(hw, cq->rq.bah, 0); 549 550 /* set rq.count to 0 to indicate uninitialized queue */ 551 cq->rq.count = 0; 552 553 /* free ring buffers and the ring itself */ 554 ICE_FREE_CQ_BUFS(hw, cq, rq); 555 ice_free_cq_ring(hw, &cq->rq); 556 557 shutdown_rq_out: 558 mutex_unlock(&cq->rq_lock); 559 return ret_code; 560 } 561 562 /** 563 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 564 * @hw: pointer to the hardware structure 565 */ 566 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 567 { 568 struct ice_ctl_q_info *cq = &hw->adminq; 569 enum ice_status status; 570 571 status = ice_aq_get_fw_ver(hw, NULL); 572 if (status) 573 goto init_ctrlq_free_rq; 574 575 if (!ice_aq_ver_check(hw)) { 576 status = ICE_ERR_FW_API_VER; 577 goto init_ctrlq_free_rq; 578 } 579 580 return 0; 581 582 init_ctrlq_free_rq: 583 ice_shutdown_rq(hw, cq); 584 ice_shutdown_sq(hw, cq); 585 return status; 586 } 587 588 /** 589 * ice_init_ctrlq - main initialization routine for any control Queue 590 * @hw: pointer to the hardware structure 591 * @q_type: specific Control queue type 592 * 593 * Prior to calling this function, the driver *MUST* set the following fields 594 * in the cq->structure: 595 * - cq->num_sq_entries 596 * - cq->num_rq_entries 597 * - cq->rq_buf_size 598 * - cq->sq_buf_size 599 * 600 * NOTE: this function does not initialize the controlq locks 601 */ 602 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 603 { 604 struct ice_ctl_q_info *cq; 605 enum ice_status ret_code; 606 607 switch (q_type) { 608 case ICE_CTL_Q_ADMIN: 609 ice_adminq_init_regs(hw); 610 cq = &hw->adminq; 611 break; 612 case ICE_CTL_Q_MAILBOX: 613 ice_mailbox_init_regs(hw); 614 cq = &hw->mailboxq; 615 break; 616 default: 617 return ICE_ERR_PARAM; 618 } 619 cq->qtype = q_type; 620 621 /* verify input for valid configuration */ 622 if (!cq->num_rq_entries || !cq->num_sq_entries || 623 !cq->rq_buf_size || !cq->sq_buf_size) { 624 return ICE_ERR_CFG; 625 } 626 627 /* setup SQ command write back timeout */ 628 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 629 630 /* allocate the ATQ */ 631 ret_code = ice_init_sq(hw, cq); 632 if (ret_code) 633 return ret_code; 634 635 /* allocate the ARQ */ 636 ret_code = ice_init_rq(hw, cq); 637 if (ret_code) 638 goto init_ctrlq_free_sq; 639 640 /* success! */ 641 return 0; 642 643 init_ctrlq_free_sq: 644 ice_shutdown_sq(hw, cq); 645 return ret_code; 646 } 647 648 /** 649 * ice_shutdown_ctrlq - shutdown routine for any control queue 650 * @hw: pointer to the hardware structure 651 * @q_type: specific Control queue type 652 * 653 * NOTE: this function does not destroy the control queue locks. 654 */ 655 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 656 { 657 struct ice_ctl_q_info *cq; 658 659 switch (q_type) { 660 case ICE_CTL_Q_ADMIN: 661 cq = &hw->adminq; 662 if (ice_check_sq_alive(hw, cq)) 663 ice_aq_q_shutdown(hw, true); 664 break; 665 case ICE_CTL_Q_MAILBOX: 666 cq = &hw->mailboxq; 667 break; 668 default: 669 return; 670 } 671 672 ice_shutdown_sq(hw, cq); 673 ice_shutdown_rq(hw, cq); 674 } 675 676 /** 677 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 678 * @hw: pointer to the hardware structure 679 * 680 * NOTE: this function does not destroy the control queue locks. The driver 681 * may call this at runtime to shutdown and later restart control queues, such 682 * as in response to a reset event. 683 */ 684 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 685 { 686 /* Shutdown FW admin queue */ 687 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 688 /* Shutdown PF-VF Mailbox */ 689 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 690 } 691 692 /** 693 * ice_init_all_ctrlq - main initialization routine for all control queues 694 * @hw: pointer to the hardware structure 695 * 696 * Prior to calling this function, the driver MUST* set the following fields 697 * in the cq->structure for all control queues: 698 * - cq->num_sq_entries 699 * - cq->num_rq_entries 700 * - cq->rq_buf_size 701 * - cq->sq_buf_size 702 * 703 * NOTE: this function does not initialize the controlq locks. 704 */ 705 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 706 { 707 enum ice_status status; 708 u32 retry = 0; 709 710 /* Init FW admin queue */ 711 do { 712 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 713 if (status) 714 return status; 715 716 status = ice_init_check_adminq(hw); 717 if (status != ICE_ERR_AQ_FW_CRITICAL) 718 break; 719 720 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); 721 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 722 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 723 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 724 725 if (status) 726 return status; 727 /* Init Mailbox queue */ 728 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 729 } 730 731 /** 732 * ice_init_ctrlq_locks - Initialize locks for a control queue 733 * @cq: pointer to the control queue 734 * 735 * Initializes the send and receive queue locks for a given control queue. 736 */ 737 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 738 { 739 mutex_init(&cq->sq_lock); 740 mutex_init(&cq->rq_lock); 741 } 742 743 /** 744 * ice_create_all_ctrlq - main initialization routine for all control queues 745 * @hw: pointer to the hardware structure 746 * 747 * Prior to calling this function, the driver *MUST* set the following fields 748 * in the cq->structure for all control queues: 749 * - cq->num_sq_entries 750 * - cq->num_rq_entries 751 * - cq->rq_buf_size 752 * - cq->sq_buf_size 753 * 754 * This function creates all the control queue locks and then calls 755 * ice_init_all_ctrlq. It should be called once during driver load. If the 756 * driver needs to re-initialize control queues at run time it should call 757 * ice_init_all_ctrlq instead. 758 */ 759 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) 760 { 761 ice_init_ctrlq_locks(&hw->adminq); 762 ice_init_ctrlq_locks(&hw->mailboxq); 763 764 return ice_init_all_ctrlq(hw); 765 } 766 767 /** 768 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 769 * @cq: pointer to the control queue 770 * 771 * Destroys the send and receive queue locks for a given control queue. 772 */ 773 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 774 { 775 mutex_destroy(&cq->sq_lock); 776 mutex_destroy(&cq->rq_lock); 777 } 778 779 /** 780 * ice_destroy_all_ctrlq - exit routine for all control queues 781 * @hw: pointer to the hardware structure 782 * 783 * This function shuts down all the control queues and then destroys the 784 * control queue locks. It should be called once during driver unload. The 785 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 786 * reinitialize control queues, such as in response to a reset event. 787 */ 788 void ice_destroy_all_ctrlq(struct ice_hw *hw) 789 { 790 /* shut down all the control queues first */ 791 ice_shutdown_all_ctrlq(hw); 792 793 ice_destroy_ctrlq_locks(&hw->adminq); 794 ice_destroy_ctrlq_locks(&hw->mailboxq); 795 } 796 797 /** 798 * ice_clean_sq - cleans Admin send queue (ATQ) 799 * @hw: pointer to the hardware structure 800 * @cq: pointer to the specific Control queue 801 * 802 * returns the number of free desc 803 */ 804 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 805 { 806 struct ice_ctl_q_ring *sq = &cq->sq; 807 u16 ntc = sq->next_to_clean; 808 struct ice_sq_cd *details; 809 struct ice_aq_desc *desc; 810 811 desc = ICE_CTL_Q_DESC(*sq, ntc); 812 details = ICE_CTL_Q_DETAILS(*sq, ntc); 813 814 while (rd32(hw, cq->sq.head) != ntc) { 815 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 816 memset(desc, 0, sizeof(*desc)); 817 memset(details, 0, sizeof(*details)); 818 ntc++; 819 if (ntc == sq->count) 820 ntc = 0; 821 desc = ICE_CTL_Q_DESC(*sq, ntc); 822 details = ICE_CTL_Q_DETAILS(*sq, ntc); 823 } 824 825 sq->next_to_clean = ntc; 826 827 return ICE_CTL_Q_DESC_UNUSED(sq); 828 } 829 830 /** 831 * ice_debug_cq 832 * @hw: pointer to the hardware structure 833 * @desc: pointer to control queue descriptor 834 * @buf: pointer to command buffer 835 * @buf_len: max length of buf 836 * 837 * Dumps debug log about control command with descriptor contents. 838 */ 839 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 840 { 841 struct ice_aq_desc *cq_desc = desc; 842 u16 len; 843 844 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 845 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 846 return; 847 848 if (!desc) 849 return; 850 851 len = le16_to_cpu(cq_desc->datalen); 852 853 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 854 le16_to_cpu(cq_desc->opcode), 855 le16_to_cpu(cq_desc->flags), 856 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 857 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 858 le32_to_cpu(cq_desc->cookie_high), 859 le32_to_cpu(cq_desc->cookie_low)); 860 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 861 le32_to_cpu(cq_desc->params.generic.param0), 862 le32_to_cpu(cq_desc->params.generic.param1)); 863 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 864 le32_to_cpu(cq_desc->params.generic.addr_high), 865 le32_to_cpu(cq_desc->params.generic.addr_low)); 866 if (buf && cq_desc->datalen != 0) { 867 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 868 if (buf_len < len) 869 len = buf_len; 870 871 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); 872 } 873 } 874 875 /** 876 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 877 * @hw: pointer to the HW struct 878 * @cq: pointer to the specific Control queue 879 * 880 * Returns true if the firmware has processed all descriptors on the 881 * admin send queue. Returns false if there are still requests pending. 882 */ 883 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 884 { 885 /* AQ designers suggest use of head for better 886 * timing reliability than DD bit 887 */ 888 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 889 } 890 891 /** 892 * ice_sq_send_cmd - send command to Control Queue (ATQ) 893 * @hw: pointer to the HW struct 894 * @cq: pointer to the specific Control queue 895 * @desc: prefilled descriptor describing the command 896 * @buf: buffer to use for indirect commands (or NULL for direct commands) 897 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 898 * @cd: pointer to command details structure 899 * 900 * This is the main send command routine for the ATQ. It runs the queue, 901 * cleans the queue, etc. 902 */ 903 enum ice_status 904 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 905 struct ice_aq_desc *desc, void *buf, u16 buf_size, 906 struct ice_sq_cd *cd) 907 { 908 struct ice_dma_mem *dma_buf = NULL; 909 struct ice_aq_desc *desc_on_ring; 910 bool cmd_completed = false; 911 enum ice_status status = 0; 912 struct ice_sq_cd *details; 913 u32 total_delay = 0; 914 u16 retval = 0; 915 u32 val = 0; 916 917 /* if reset is in progress return a soft error */ 918 if (hw->reset_ongoing) 919 return ICE_ERR_RESET_ONGOING; 920 mutex_lock(&cq->sq_lock); 921 922 cq->sq_last_status = ICE_AQ_RC_OK; 923 924 if (!cq->sq.count) { 925 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); 926 status = ICE_ERR_AQ_EMPTY; 927 goto sq_send_command_error; 928 } 929 930 if ((buf && !buf_size) || (!buf && buf_size)) { 931 status = ICE_ERR_PARAM; 932 goto sq_send_command_error; 933 } 934 935 if (buf) { 936 if (buf_size > cq->sq_buf_size) { 937 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", 938 buf_size); 939 status = ICE_ERR_INVAL_SIZE; 940 goto sq_send_command_error; 941 } 942 943 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 944 if (buf_size > ICE_AQ_LG_BUF) 945 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 946 } 947 948 val = rd32(hw, cq->sq.head); 949 if (val >= cq->num_sq_entries) { 950 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", 951 val); 952 status = ICE_ERR_AQ_EMPTY; 953 goto sq_send_command_error; 954 } 955 956 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 957 if (cd) 958 *details = *cd; 959 else 960 memset(details, 0, sizeof(*details)); 961 962 /* Call clean and check queue available function to reclaim the 963 * descriptors that were processed by FW/MBX; the function returns the 964 * number of desc available. The clean function called here could be 965 * called in a separate thread in case of asynchronous completions. 966 */ 967 if (ice_clean_sq(hw, cq) == 0) { 968 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); 969 status = ICE_ERR_AQ_FULL; 970 goto sq_send_command_error; 971 } 972 973 /* initialize the temp desc pointer with the right desc */ 974 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 975 976 /* if the desc is available copy the temp desc to the right place */ 977 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 978 979 /* if buf is not NULL assume indirect command */ 980 if (buf) { 981 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 982 /* copy the user buf into the respective DMA buf */ 983 memcpy(dma_buf->va, buf, buf_size); 984 desc_on_ring->datalen = cpu_to_le16(buf_size); 985 986 /* Update the address values in the desc with the pa value 987 * for respective buffer 988 */ 989 desc_on_ring->params.generic.addr_high = 990 cpu_to_le32(upper_32_bits(dma_buf->pa)); 991 desc_on_ring->params.generic.addr_low = 992 cpu_to_le32(lower_32_bits(dma_buf->pa)); 993 } 994 995 /* Debug desc and buffer */ 996 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 997 998 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 999 1000 (cq->sq.next_to_use)++; 1001 if (cq->sq.next_to_use == cq->sq.count) 1002 cq->sq.next_to_use = 0; 1003 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1004 1005 do { 1006 if (ice_sq_done(hw, cq)) 1007 break; 1008 1009 udelay(ICE_CTL_Q_SQ_CMD_USEC); 1010 total_delay++; 1011 } while (total_delay < cq->sq_cmd_timeout); 1012 1013 /* if ready, copy the desc back to temp */ 1014 if (ice_sq_done(hw, cq)) { 1015 memcpy(desc, desc_on_ring, sizeof(*desc)); 1016 if (buf) { 1017 /* get returned length to copy */ 1018 u16 copy_size = le16_to_cpu(desc->datalen); 1019 1020 if (copy_size > buf_size) { 1021 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", 1022 copy_size, buf_size); 1023 status = ICE_ERR_AQ_ERROR; 1024 } else { 1025 memcpy(buf, dma_buf->va, copy_size); 1026 } 1027 } 1028 retval = le16_to_cpu(desc->retval); 1029 if (retval) { 1030 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", 1031 le16_to_cpu(desc->opcode), 1032 retval); 1033 1034 /* strip off FW internal code */ 1035 retval &= 0xff; 1036 } 1037 cmd_completed = true; 1038 if (!status && retval != ICE_AQ_RC_OK) 1039 status = ICE_ERR_AQ_ERROR; 1040 cq->sq_last_status = (enum ice_aq_err)retval; 1041 } 1042 1043 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1044 1045 ice_debug_cq(hw, (void *)desc, buf, buf_size); 1046 1047 /* save writeback AQ if requested */ 1048 if (details->wb_desc) 1049 memcpy(details->wb_desc, desc_on_ring, 1050 sizeof(*details->wb_desc)); 1051 1052 /* update the error if time out occurred */ 1053 if (!cmd_completed) { 1054 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1055 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1056 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1057 status = ICE_ERR_AQ_FW_CRITICAL; 1058 } else { 1059 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); 1060 status = ICE_ERR_AQ_TIMEOUT; 1061 } 1062 } 1063 1064 sq_send_command_error: 1065 mutex_unlock(&cq->sq_lock); 1066 return status; 1067 } 1068 1069 /** 1070 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1071 * @desc: pointer to the temp descriptor (non DMA mem) 1072 * @opcode: the opcode can be used to decide which flags to turn off or on 1073 * 1074 * Fill the desc with default values 1075 */ 1076 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1077 { 1078 /* zero out the desc */ 1079 memset(desc, 0, sizeof(*desc)); 1080 desc->opcode = cpu_to_le16(opcode); 1081 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1082 } 1083 1084 /** 1085 * ice_clean_rq_elem 1086 * @hw: pointer to the HW struct 1087 * @cq: pointer to the specific Control queue 1088 * @e: event info from the receive descriptor, includes any buffers 1089 * @pending: number of events that could be left to process 1090 * 1091 * This function cleans one Admin Receive Queue element and returns 1092 * the contents through e. It can also return how many events are 1093 * left to process through 'pending'. 1094 */ 1095 enum ice_status 1096 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1097 struct ice_rq_event_info *e, u16 *pending) 1098 { 1099 u16 ntc = cq->rq.next_to_clean; 1100 enum ice_aq_err rq_last_status; 1101 enum ice_status ret_code = 0; 1102 struct ice_aq_desc *desc; 1103 struct ice_dma_mem *bi; 1104 u16 desc_idx; 1105 u16 datalen; 1106 u16 flags; 1107 u16 ntu; 1108 1109 /* pre-clean the event info */ 1110 memset(&e->desc, 0, sizeof(e->desc)); 1111 1112 /* take the lock before we start messing with the ring */ 1113 mutex_lock(&cq->rq_lock); 1114 1115 if (!cq->rq.count) { 1116 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); 1117 ret_code = ICE_ERR_AQ_EMPTY; 1118 goto clean_rq_elem_err; 1119 } 1120 1121 /* set next_to_use to head */ 1122 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1123 1124 if (ntu == ntc) { 1125 /* nothing to do - shouldn't need to update ring's values */ 1126 ret_code = ICE_ERR_AQ_NO_WORK; 1127 goto clean_rq_elem_out; 1128 } 1129 1130 /* now clean the next descriptor */ 1131 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1132 desc_idx = ntc; 1133 1134 rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1135 flags = le16_to_cpu(desc->flags); 1136 if (flags & ICE_AQ_FLAG_ERR) { 1137 ret_code = ICE_ERR_AQ_ERROR; 1138 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1139 le16_to_cpu(desc->opcode), rq_last_status); 1140 } 1141 memcpy(&e->desc, desc, sizeof(e->desc)); 1142 datalen = le16_to_cpu(desc->datalen); 1143 e->msg_len = min_t(u16, datalen, e->buf_len); 1144 if (e->msg_buf && e->msg_len) 1145 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1146 1147 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1148 1149 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1150 1151 /* Restore the original datalen and buffer address in the desc, 1152 * FW updates datalen to indicate the event message size 1153 */ 1154 bi = &cq->rq.r.rq_bi[ntc]; 1155 memset(desc, 0, sizeof(*desc)); 1156 1157 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1158 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1159 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1160 desc->datalen = cpu_to_le16(bi->size); 1161 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1162 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1163 1164 /* set tail = the last cleaned desc index. */ 1165 wr32(hw, cq->rq.tail, ntc); 1166 /* ntc is updated to tail + 1 */ 1167 ntc++; 1168 if (ntc == cq->num_rq_entries) 1169 ntc = 0; 1170 cq->rq.next_to_clean = ntc; 1171 cq->rq.next_to_use = ntu; 1172 1173 clean_rq_elem_out: 1174 /* Set pending if needed, unlock and return */ 1175 if (pending) { 1176 /* re-read HW head to calculate actual pending messages */ 1177 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1178 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1179 } 1180 clean_rq_elem_err: 1181 mutex_unlock(&cq->rq_lock); 1182 1183 return ret_code; 1184 } 1185