1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_check_sq_alive 56 * @hw: pointer to the HW struct 57 * @cq: pointer to the specific Control queue 58 * 59 * Returns true if Queue is enabled else false. 60 */ 61 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 62 { 63 /* check both queue-length and queue-enable fields */ 64 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 65 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 66 cq->sq.len_ena_mask)) == 67 (cq->num_sq_entries | cq->sq.len_ena_mask); 68 69 return false; 70 } 71 72 /** 73 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 74 * @hw: pointer to the hardware structure 75 * @cq: pointer to the specific Control queue 76 */ 77 static enum ice_status 78 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 79 { 80 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 81 82 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 83 &cq->sq.desc_buf.pa, 84 GFP_KERNEL | __GFP_ZERO); 85 if (!cq->sq.desc_buf.va) 86 return ICE_ERR_NO_MEMORY; 87 cq->sq.desc_buf.size = size; 88 89 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 90 sizeof(struct ice_sq_cd), GFP_KERNEL); 91 if (!cq->sq.cmd_buf) { 92 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 93 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 94 cq->sq.desc_buf.va = NULL; 95 cq->sq.desc_buf.pa = 0; 96 cq->sq.desc_buf.size = 0; 97 return ICE_ERR_NO_MEMORY; 98 } 99 100 return 0; 101 } 102 103 /** 104 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 105 * @hw: pointer to the hardware structure 106 * @cq: pointer to the specific Control queue 107 */ 108 static enum ice_status 109 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 110 { 111 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 112 113 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 114 &cq->rq.desc_buf.pa, 115 GFP_KERNEL | __GFP_ZERO); 116 if (!cq->rq.desc_buf.va) 117 return ICE_ERR_NO_MEMORY; 118 cq->rq.desc_buf.size = size; 119 return 0; 120 } 121 122 /** 123 * ice_free_cq_ring - Free control queue ring 124 * @hw: pointer to the hardware structure 125 * @ring: pointer to the specific control queue ring 126 * 127 * This assumes the posted buffers have already been cleaned 128 * and de-allocated 129 */ 130 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 131 { 132 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 133 ring->desc_buf.va, ring->desc_buf.pa); 134 ring->desc_buf.va = NULL; 135 ring->desc_buf.pa = 0; 136 ring->desc_buf.size = 0; 137 } 138 139 /** 140 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 141 * @hw: pointer to the hardware structure 142 * @cq: pointer to the specific Control queue 143 */ 144 static enum ice_status 145 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 146 { 147 int i; 148 149 /* We'll be allocating the buffer info memory first, then we can 150 * allocate the mapped buffers for the event processing 151 */ 152 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 153 sizeof(cq->rq.desc_buf), GFP_KERNEL); 154 if (!cq->rq.dma_head) 155 return ICE_ERR_NO_MEMORY; 156 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 157 158 /* allocate the mapped buffers */ 159 for (i = 0; i < cq->num_rq_entries; i++) { 160 struct ice_aq_desc *desc; 161 struct ice_dma_mem *bi; 162 163 bi = &cq->rq.r.rq_bi[i]; 164 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 165 cq->rq_buf_size, &bi->pa, 166 GFP_KERNEL | __GFP_ZERO); 167 if (!bi->va) 168 goto unwind_alloc_rq_bufs; 169 bi->size = cq->rq_buf_size; 170 171 /* now configure the descriptors for use */ 172 desc = ICE_CTL_Q_DESC(cq->rq, i); 173 174 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 175 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 176 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 177 desc->opcode = 0; 178 /* This is in accordance with Admin queue design, there is no 179 * register for buffer size configuration 180 */ 181 desc->datalen = cpu_to_le16(bi->size); 182 desc->retval = 0; 183 desc->cookie_high = 0; 184 desc->cookie_low = 0; 185 desc->params.generic.addr_high = 186 cpu_to_le32(upper_32_bits(bi->pa)); 187 desc->params.generic.addr_low = 188 cpu_to_le32(lower_32_bits(bi->pa)); 189 desc->params.generic.param0 = 0; 190 desc->params.generic.param1 = 0; 191 } 192 return 0; 193 194 unwind_alloc_rq_bufs: 195 /* don't try to free the one that failed... */ 196 i--; 197 for (; i >= 0; i--) { 198 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 199 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 200 cq->rq.r.rq_bi[i].va = NULL; 201 cq->rq.r.rq_bi[i].pa = 0; 202 cq->rq.r.rq_bi[i].size = 0; 203 } 204 cq->rq.r.rq_bi = NULL; 205 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 206 cq->rq.dma_head = NULL; 207 208 return ICE_ERR_NO_MEMORY; 209 } 210 211 /** 212 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 213 * @hw: pointer to the hardware structure 214 * @cq: pointer to the specific Control queue 215 */ 216 static enum ice_status 217 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 218 { 219 int i; 220 221 /* No mapped memory needed yet, just the buffer info structures */ 222 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 223 sizeof(cq->sq.desc_buf), GFP_KERNEL); 224 if (!cq->sq.dma_head) 225 return ICE_ERR_NO_MEMORY; 226 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 227 228 /* allocate the mapped buffers */ 229 for (i = 0; i < cq->num_sq_entries; i++) { 230 struct ice_dma_mem *bi; 231 232 bi = &cq->sq.r.sq_bi[i]; 233 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 234 cq->sq_buf_size, &bi->pa, 235 GFP_KERNEL | __GFP_ZERO); 236 if (!bi->va) 237 goto unwind_alloc_sq_bufs; 238 bi->size = cq->sq_buf_size; 239 } 240 return 0; 241 242 unwind_alloc_sq_bufs: 243 /* don't try to free the one that failed... */ 244 i--; 245 for (; i >= 0; i--) { 246 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 247 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 248 cq->sq.r.sq_bi[i].va = NULL; 249 cq->sq.r.sq_bi[i].pa = 0; 250 cq->sq.r.sq_bi[i].size = 0; 251 } 252 cq->sq.r.sq_bi = NULL; 253 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 254 cq->sq.dma_head = NULL; 255 256 return ICE_ERR_NO_MEMORY; 257 } 258 259 static enum ice_status 260 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 261 { 262 /* Clear Head and Tail */ 263 wr32(hw, ring->head, 0); 264 wr32(hw, ring->tail, 0); 265 266 /* set starting point */ 267 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 268 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 269 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 270 271 /* Check one register to verify that config was applied */ 272 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 273 return ICE_ERR_AQ_ERROR; 274 275 return 0; 276 } 277 278 /** 279 * ice_cfg_sq_regs - configure Control ATQ registers 280 * @hw: pointer to the hardware structure 281 * @cq: pointer to the specific Control queue 282 * 283 * Configure base address and length registers for the transmit queue 284 */ 285 static enum ice_status 286 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 287 { 288 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 289 } 290 291 /** 292 * ice_cfg_rq_regs - configure Control ARQ register 293 * @hw: pointer to the hardware structure 294 * @cq: pointer to the specific Control queue 295 * 296 * Configure base address and length registers for the receive (event queue) 297 */ 298 static enum ice_status 299 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 300 { 301 enum ice_status status; 302 303 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 304 if (status) 305 return status; 306 307 /* Update tail in the HW to post pre-allocated buffers */ 308 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 309 310 return 0; 311 } 312 313 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 314 do { \ 315 int i; \ 316 /* free descriptors */ \ 317 if ((qi)->ring.r.ring##_bi) \ 318 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 319 if ((qi)->ring.r.ring##_bi[i].pa) { \ 320 dmam_free_coherent(ice_hw_to_dev(hw), \ 321 (qi)->ring.r.ring##_bi[i].size, \ 322 (qi)->ring.r.ring##_bi[i].va, \ 323 (qi)->ring.r.ring##_bi[i].pa); \ 324 (qi)->ring.r.ring##_bi[i].va = NULL;\ 325 (qi)->ring.r.ring##_bi[i].pa = 0;\ 326 (qi)->ring.r.ring##_bi[i].size = 0;\ 327 } \ 328 /* free the buffer info list */ \ 329 if ((qi)->ring.cmd_buf) \ 330 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 331 /* free DMA head */ \ 332 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 333 } while (0) 334 335 /** 336 * ice_init_sq - main initialization routine for Control ATQ 337 * @hw: pointer to the hardware structure 338 * @cq: pointer to the specific Control queue 339 * 340 * This is the main initialization routine for the Control Send Queue 341 * Prior to calling this function, the driver *MUST* set the following fields 342 * in the cq->structure: 343 * - cq->num_sq_entries 344 * - cq->sq_buf_size 345 * 346 * Do *NOT* hold the lock when calling this as the memory allocation routines 347 * called are not going to be atomic context safe 348 */ 349 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 350 { 351 enum ice_status ret_code; 352 353 if (cq->sq.count > 0) { 354 /* queue already initialized */ 355 ret_code = ICE_ERR_NOT_READY; 356 goto init_ctrlq_exit; 357 } 358 359 /* verify input for valid configuration */ 360 if (!cq->num_sq_entries || !cq->sq_buf_size) { 361 ret_code = ICE_ERR_CFG; 362 goto init_ctrlq_exit; 363 } 364 365 cq->sq.next_to_use = 0; 366 cq->sq.next_to_clean = 0; 367 368 /* allocate the ring memory */ 369 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 370 if (ret_code) 371 goto init_ctrlq_exit; 372 373 /* allocate buffers in the rings */ 374 ret_code = ice_alloc_sq_bufs(hw, cq); 375 if (ret_code) 376 goto init_ctrlq_free_rings; 377 378 /* initialize base registers */ 379 ret_code = ice_cfg_sq_regs(hw, cq); 380 if (ret_code) 381 goto init_ctrlq_free_rings; 382 383 /* success! */ 384 cq->sq.count = cq->num_sq_entries; 385 goto init_ctrlq_exit; 386 387 init_ctrlq_free_rings: 388 ICE_FREE_CQ_BUFS(hw, cq, sq); 389 ice_free_cq_ring(hw, &cq->sq); 390 391 init_ctrlq_exit: 392 return ret_code; 393 } 394 395 /** 396 * ice_init_rq - initialize ARQ 397 * @hw: pointer to the hardware structure 398 * @cq: pointer to the specific Control queue 399 * 400 * The main initialization routine for the Admin Receive (Event) Queue. 401 * Prior to calling this function, the driver *MUST* set the following fields 402 * in the cq->structure: 403 * - cq->num_rq_entries 404 * - cq->rq_buf_size 405 * 406 * Do *NOT* hold the lock when calling this as the memory allocation routines 407 * called are not going to be atomic context safe 408 */ 409 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 410 { 411 enum ice_status ret_code; 412 413 if (cq->rq.count > 0) { 414 /* queue already initialized */ 415 ret_code = ICE_ERR_NOT_READY; 416 goto init_ctrlq_exit; 417 } 418 419 /* verify input for valid configuration */ 420 if (!cq->num_rq_entries || !cq->rq_buf_size) { 421 ret_code = ICE_ERR_CFG; 422 goto init_ctrlq_exit; 423 } 424 425 cq->rq.next_to_use = 0; 426 cq->rq.next_to_clean = 0; 427 428 /* allocate the ring memory */ 429 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 430 if (ret_code) 431 goto init_ctrlq_exit; 432 433 /* allocate buffers in the rings */ 434 ret_code = ice_alloc_rq_bufs(hw, cq); 435 if (ret_code) 436 goto init_ctrlq_free_rings; 437 438 /* initialize base registers */ 439 ret_code = ice_cfg_rq_regs(hw, cq); 440 if (ret_code) 441 goto init_ctrlq_free_rings; 442 443 /* success! */ 444 cq->rq.count = cq->num_rq_entries; 445 goto init_ctrlq_exit; 446 447 init_ctrlq_free_rings: 448 ICE_FREE_CQ_BUFS(hw, cq, rq); 449 ice_free_cq_ring(hw, &cq->rq); 450 451 init_ctrlq_exit: 452 return ret_code; 453 } 454 455 /** 456 * ice_shutdown_sq - shutdown the Control ATQ 457 * @hw: pointer to the hardware structure 458 * @cq: pointer to the specific Control queue 459 * 460 * The main shutdown routine for the Control Transmit Queue 461 */ 462 static enum ice_status 463 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 464 { 465 enum ice_status ret_code = 0; 466 467 mutex_lock(&cq->sq_lock); 468 469 if (!cq->sq.count) { 470 ret_code = ICE_ERR_NOT_READY; 471 goto shutdown_sq_out; 472 } 473 474 /* Stop firmware AdminQ processing */ 475 wr32(hw, cq->sq.head, 0); 476 wr32(hw, cq->sq.tail, 0); 477 wr32(hw, cq->sq.len, 0); 478 wr32(hw, cq->sq.bal, 0); 479 wr32(hw, cq->sq.bah, 0); 480 481 cq->sq.count = 0; /* to indicate uninitialized queue */ 482 483 /* free ring buffers and the ring itself */ 484 ICE_FREE_CQ_BUFS(hw, cq, sq); 485 ice_free_cq_ring(hw, &cq->sq); 486 487 shutdown_sq_out: 488 mutex_unlock(&cq->sq_lock); 489 return ret_code; 490 } 491 492 /** 493 * ice_aq_ver_check - Check the reported AQ API version. 494 * @hw: pointer to the hardware structure 495 * 496 * Checks if the driver should load on a given AQ API version. 497 * 498 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 499 */ 500 static bool ice_aq_ver_check(struct ice_hw *hw) 501 { 502 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { 503 /* Major API version is newer than expected, don't load */ 504 dev_warn(ice_hw_to_dev(hw), 505 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 506 return false; 507 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { 508 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) 509 dev_info(ice_hw_to_dev(hw), 510 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 511 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) 512 dev_info(ice_hw_to_dev(hw), 513 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 514 } else { 515 /* Major API version is older than expected, log a warning */ 516 dev_info(ice_hw_to_dev(hw), 517 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 518 } 519 return true; 520 } 521 522 /** 523 * ice_shutdown_rq - shutdown Control ARQ 524 * @hw: pointer to the hardware structure 525 * @cq: pointer to the specific Control queue 526 * 527 * The main shutdown routine for the Control Receive Queue 528 */ 529 static enum ice_status 530 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 531 { 532 enum ice_status ret_code = 0; 533 534 mutex_lock(&cq->rq_lock); 535 536 if (!cq->rq.count) { 537 ret_code = ICE_ERR_NOT_READY; 538 goto shutdown_rq_out; 539 } 540 541 /* Stop Control Queue processing */ 542 wr32(hw, cq->rq.head, 0); 543 wr32(hw, cq->rq.tail, 0); 544 wr32(hw, cq->rq.len, 0); 545 wr32(hw, cq->rq.bal, 0); 546 wr32(hw, cq->rq.bah, 0); 547 548 /* set rq.count to 0 to indicate uninitialized queue */ 549 cq->rq.count = 0; 550 551 /* free ring buffers and the ring itself */ 552 ICE_FREE_CQ_BUFS(hw, cq, rq); 553 ice_free_cq_ring(hw, &cq->rq); 554 555 shutdown_rq_out: 556 mutex_unlock(&cq->rq_lock); 557 return ret_code; 558 } 559 560 /** 561 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 562 * @hw: pointer to the hardware structure 563 */ 564 static enum ice_status ice_init_check_adminq(struct ice_hw *hw) 565 { 566 struct ice_ctl_q_info *cq = &hw->adminq; 567 enum ice_status status; 568 569 status = ice_aq_get_fw_ver(hw, NULL); 570 if (status) 571 goto init_ctrlq_free_rq; 572 573 if (!ice_aq_ver_check(hw)) { 574 status = ICE_ERR_FW_API_VER; 575 goto init_ctrlq_free_rq; 576 } 577 578 return 0; 579 580 init_ctrlq_free_rq: 581 ice_shutdown_rq(hw, cq); 582 ice_shutdown_sq(hw, cq); 583 return status; 584 } 585 586 /** 587 * ice_init_ctrlq - main initialization routine for any control Queue 588 * @hw: pointer to the hardware structure 589 * @q_type: specific Control queue type 590 * 591 * Prior to calling this function, the driver *MUST* set the following fields 592 * in the cq->structure: 593 * - cq->num_sq_entries 594 * - cq->num_rq_entries 595 * - cq->rq_buf_size 596 * - cq->sq_buf_size 597 * 598 * NOTE: this function does not initialize the controlq locks 599 */ 600 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 601 { 602 struct ice_ctl_q_info *cq; 603 enum ice_status ret_code; 604 605 switch (q_type) { 606 case ICE_CTL_Q_ADMIN: 607 ice_adminq_init_regs(hw); 608 cq = &hw->adminq; 609 break; 610 case ICE_CTL_Q_MAILBOX: 611 ice_mailbox_init_regs(hw); 612 cq = &hw->mailboxq; 613 break; 614 default: 615 return ICE_ERR_PARAM; 616 } 617 cq->qtype = q_type; 618 619 /* verify input for valid configuration */ 620 if (!cq->num_rq_entries || !cq->num_sq_entries || 621 !cq->rq_buf_size || !cq->sq_buf_size) { 622 return ICE_ERR_CFG; 623 } 624 625 /* setup SQ command write back timeout */ 626 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; 627 628 /* allocate the ATQ */ 629 ret_code = ice_init_sq(hw, cq); 630 if (ret_code) 631 return ret_code; 632 633 /* allocate the ARQ */ 634 ret_code = ice_init_rq(hw, cq); 635 if (ret_code) 636 goto init_ctrlq_free_sq; 637 638 /* success! */ 639 return 0; 640 641 init_ctrlq_free_sq: 642 ice_shutdown_sq(hw, cq); 643 return ret_code; 644 } 645 646 /** 647 * ice_shutdown_ctrlq - shutdown routine for any control queue 648 * @hw: pointer to the hardware structure 649 * @q_type: specific Control queue type 650 * 651 * NOTE: this function does not destroy the control queue locks. 652 */ 653 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 654 { 655 struct ice_ctl_q_info *cq; 656 657 switch (q_type) { 658 case ICE_CTL_Q_ADMIN: 659 cq = &hw->adminq; 660 if (ice_check_sq_alive(hw, cq)) 661 ice_aq_q_shutdown(hw, true); 662 break; 663 case ICE_CTL_Q_MAILBOX: 664 cq = &hw->mailboxq; 665 break; 666 default: 667 return; 668 } 669 670 ice_shutdown_sq(hw, cq); 671 ice_shutdown_rq(hw, cq); 672 } 673 674 /** 675 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 676 * @hw: pointer to the hardware structure 677 * 678 * NOTE: this function does not destroy the control queue locks. The driver 679 * may call this at runtime to shutdown and later restart control queues, such 680 * as in response to a reset event. 681 */ 682 void ice_shutdown_all_ctrlq(struct ice_hw *hw) 683 { 684 /* Shutdown FW admin queue */ 685 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 686 /* Shutdown PF-VF Mailbox */ 687 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 688 } 689 690 /** 691 * ice_init_all_ctrlq - main initialization routine for all control queues 692 * @hw: pointer to the hardware structure 693 * 694 * Prior to calling this function, the driver MUST* set the following fields 695 * in the cq->structure for all control queues: 696 * - cq->num_sq_entries 697 * - cq->num_rq_entries 698 * - cq->rq_buf_size 699 * - cq->sq_buf_size 700 * 701 * NOTE: this function does not initialize the controlq locks. 702 */ 703 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) 704 { 705 enum ice_status status; 706 u32 retry = 0; 707 708 /* Init FW admin queue */ 709 do { 710 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 711 if (status) 712 return status; 713 714 status = ice_init_check_adminq(hw); 715 if (status != ICE_ERR_AQ_FW_CRITICAL) 716 break; 717 718 ice_debug(hw, ICE_DBG_AQ_MSG, 719 "Retry Admin Queue init due to FW critical error\n"); 720 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 721 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 722 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 723 724 if (status) 725 return status; 726 /* Init Mailbox queue */ 727 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 728 } 729 730 /** 731 * ice_init_ctrlq_locks - Initialize locks for a control queue 732 * @cq: pointer to the control queue 733 * 734 * Initializes the send and receive queue locks for a given control queue. 735 */ 736 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 737 { 738 mutex_init(&cq->sq_lock); 739 mutex_init(&cq->rq_lock); 740 } 741 742 /** 743 * ice_create_all_ctrlq - main initialization routine for all control queues 744 * @hw: pointer to the hardware structure 745 * 746 * Prior to calling this function, the driver *MUST* set the following fields 747 * in the cq->structure for all control queues: 748 * - cq->num_sq_entries 749 * - cq->num_rq_entries 750 * - cq->rq_buf_size 751 * - cq->sq_buf_size 752 * 753 * This function creates all the control queue locks and then calls 754 * ice_init_all_ctrlq. It should be called once during driver load. If the 755 * driver needs to re-initialize control queues at run time it should call 756 * ice_init_all_ctrlq instead. 757 */ 758 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) 759 { 760 ice_init_ctrlq_locks(&hw->adminq); 761 ice_init_ctrlq_locks(&hw->mailboxq); 762 763 return ice_init_all_ctrlq(hw); 764 } 765 766 /** 767 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 768 * @cq: pointer to the control queue 769 * 770 * Destroys the send and receive queue locks for a given control queue. 771 */ 772 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 773 { 774 mutex_destroy(&cq->sq_lock); 775 mutex_destroy(&cq->rq_lock); 776 } 777 778 /** 779 * ice_destroy_all_ctrlq - exit routine for all control queues 780 * @hw: pointer to the hardware structure 781 * 782 * This function shuts down all the control queues and then destroys the 783 * control queue locks. It should be called once during driver unload. The 784 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 785 * reinitialize control queues, such as in response to a reset event. 786 */ 787 void ice_destroy_all_ctrlq(struct ice_hw *hw) 788 { 789 /* shut down all the control queues first */ 790 ice_shutdown_all_ctrlq(hw); 791 792 ice_destroy_ctrlq_locks(&hw->adminq); 793 ice_destroy_ctrlq_locks(&hw->mailboxq); 794 } 795 796 /** 797 * ice_clean_sq - cleans Admin send queue (ATQ) 798 * @hw: pointer to the hardware structure 799 * @cq: pointer to the specific Control queue 800 * 801 * returns the number of free desc 802 */ 803 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 804 { 805 struct ice_ctl_q_ring *sq = &cq->sq; 806 u16 ntc = sq->next_to_clean; 807 struct ice_sq_cd *details; 808 struct ice_aq_desc *desc; 809 810 desc = ICE_CTL_Q_DESC(*sq, ntc); 811 details = ICE_CTL_Q_DETAILS(*sq, ntc); 812 813 while (rd32(hw, cq->sq.head) != ntc) { 814 ice_debug(hw, ICE_DBG_AQ_MSG, 815 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 816 memset(desc, 0, sizeof(*desc)); 817 memset(details, 0, sizeof(*details)); 818 ntc++; 819 if (ntc == sq->count) 820 ntc = 0; 821 desc = ICE_CTL_Q_DESC(*sq, ntc); 822 details = ICE_CTL_Q_DETAILS(*sq, ntc); 823 } 824 825 sq->next_to_clean = ntc; 826 827 return ICE_CTL_Q_DESC_UNUSED(sq); 828 } 829 830 /** 831 * ice_debug_cq 832 * @hw: pointer to the hardware structure 833 * @desc: pointer to control queue descriptor 834 * @buf: pointer to command buffer 835 * @buf_len: max length of buf 836 * 837 * Dumps debug log about control command with descriptor contents. 838 */ 839 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 840 { 841 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; 842 u16 len; 843 844 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 845 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 846 return; 847 848 if (!desc) 849 return; 850 851 len = le16_to_cpu(cq_desc->datalen); 852 853 ice_debug(hw, ICE_DBG_AQ_DESC, 854 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 855 le16_to_cpu(cq_desc->opcode), 856 le16_to_cpu(cq_desc->flags), 857 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 858 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 859 le32_to_cpu(cq_desc->cookie_high), 860 le32_to_cpu(cq_desc->cookie_low)); 861 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 862 le32_to_cpu(cq_desc->params.generic.param0), 863 le32_to_cpu(cq_desc->params.generic.param1)); 864 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 865 le32_to_cpu(cq_desc->params.generic.addr_high), 866 le32_to_cpu(cq_desc->params.generic.addr_low)); 867 if (buf && cq_desc->datalen != 0) { 868 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 869 if (buf_len < len) 870 len = buf_len; 871 872 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len); 873 } 874 } 875 876 /** 877 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 878 * @hw: pointer to the HW struct 879 * @cq: pointer to the specific Control queue 880 * 881 * Returns true if the firmware has processed all descriptors on the 882 * admin send queue. Returns false if there are still requests pending. 883 */ 884 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 885 { 886 /* AQ designers suggest use of head for better 887 * timing reliability than DD bit 888 */ 889 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 890 } 891 892 /** 893 * ice_sq_send_cmd - send command to Control Queue (ATQ) 894 * @hw: pointer to the HW struct 895 * @cq: pointer to the specific Control queue 896 * @desc: prefilled descriptor describing the command (non DMA mem) 897 * @buf: buffer to use for indirect commands (or NULL for direct commands) 898 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 899 * @cd: pointer to command details structure 900 * 901 * This is the main send command routine for the ATQ. It runs the queue, 902 * cleans the queue, etc. 903 */ 904 enum ice_status 905 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 906 struct ice_aq_desc *desc, void *buf, u16 buf_size, 907 struct ice_sq_cd *cd) 908 { 909 struct ice_dma_mem *dma_buf = NULL; 910 struct ice_aq_desc *desc_on_ring; 911 bool cmd_completed = false; 912 enum ice_status status = 0; 913 struct ice_sq_cd *details; 914 u32 total_delay = 0; 915 u16 retval = 0; 916 u32 val = 0; 917 918 /* if reset is in progress return a soft error */ 919 if (hw->reset_ongoing) 920 return ICE_ERR_RESET_ONGOING; 921 mutex_lock(&cq->sq_lock); 922 923 cq->sq_last_status = ICE_AQ_RC_OK; 924 925 if (!cq->sq.count) { 926 ice_debug(hw, ICE_DBG_AQ_MSG, 927 "Control Send queue not initialized.\n"); 928 status = ICE_ERR_AQ_EMPTY; 929 goto sq_send_command_error; 930 } 931 932 if ((buf && !buf_size) || (!buf && buf_size)) { 933 status = ICE_ERR_PARAM; 934 goto sq_send_command_error; 935 } 936 937 if (buf) { 938 if (buf_size > cq->sq_buf_size) { 939 ice_debug(hw, ICE_DBG_AQ_MSG, 940 "Invalid buffer size for Control Send queue: %d.\n", 941 buf_size); 942 status = ICE_ERR_INVAL_SIZE; 943 goto sq_send_command_error; 944 } 945 946 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 947 if (buf_size > ICE_AQ_LG_BUF) 948 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 949 } 950 951 val = rd32(hw, cq->sq.head); 952 if (val >= cq->num_sq_entries) { 953 ice_debug(hw, ICE_DBG_AQ_MSG, 954 "head overrun at %d in the Control Send Queue ring\n", 955 val); 956 status = ICE_ERR_AQ_EMPTY; 957 goto sq_send_command_error; 958 } 959 960 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 961 if (cd) 962 *details = *cd; 963 else 964 memset(details, 0, sizeof(*details)); 965 966 /* Call clean and check queue available function to reclaim the 967 * descriptors that were processed by FW/MBX; the function returns the 968 * number of desc available. The clean function called here could be 969 * called in a separate thread in case of asynchronous completions. 970 */ 971 if (ice_clean_sq(hw, cq) == 0) { 972 ice_debug(hw, ICE_DBG_AQ_MSG, 973 "Error: Control Send Queue is full.\n"); 974 status = ICE_ERR_AQ_FULL; 975 goto sq_send_command_error; 976 } 977 978 /* initialize the temp desc pointer with the right desc */ 979 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 980 981 /* if the desc is available copy the temp desc to the right place */ 982 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 983 984 /* if buf is not NULL assume indirect command */ 985 if (buf) { 986 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 987 /* copy the user buf into the respective DMA buf */ 988 memcpy(dma_buf->va, buf, buf_size); 989 desc_on_ring->datalen = cpu_to_le16(buf_size); 990 991 /* Update the address values in the desc with the pa value 992 * for respective buffer 993 */ 994 desc_on_ring->params.generic.addr_high = 995 cpu_to_le32(upper_32_bits(dma_buf->pa)); 996 desc_on_ring->params.generic.addr_low = 997 cpu_to_le32(lower_32_bits(dma_buf->pa)); 998 } 999 1000 /* Debug desc and buffer */ 1001 ice_debug(hw, ICE_DBG_AQ_DESC, 1002 "ATQ: Control Send queue desc and buffer:\n"); 1003 1004 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 1005 1006 (cq->sq.next_to_use)++; 1007 if (cq->sq.next_to_use == cq->sq.count) 1008 cq->sq.next_to_use = 0; 1009 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1010 1011 do { 1012 if (ice_sq_done(hw, cq)) 1013 break; 1014 1015 udelay(ICE_CTL_Q_SQ_CMD_USEC); 1016 total_delay++; 1017 } while (total_delay < cq->sq_cmd_timeout); 1018 1019 /* if ready, copy the desc back to temp */ 1020 if (ice_sq_done(hw, cq)) { 1021 memcpy(desc, desc_on_ring, sizeof(*desc)); 1022 if (buf) { 1023 /* get returned length to copy */ 1024 u16 copy_size = le16_to_cpu(desc->datalen); 1025 1026 if (copy_size > buf_size) { 1027 ice_debug(hw, ICE_DBG_AQ_MSG, 1028 "Return len %d > than buf len %d\n", 1029 copy_size, buf_size); 1030 status = ICE_ERR_AQ_ERROR; 1031 } else { 1032 memcpy(buf, dma_buf->va, copy_size); 1033 } 1034 } 1035 retval = le16_to_cpu(desc->retval); 1036 if (retval) { 1037 ice_debug(hw, ICE_DBG_AQ_MSG, 1038 "Control Send Queue command 0x%04X completed with error 0x%X\n", 1039 le16_to_cpu(desc->opcode), 1040 retval); 1041 1042 /* strip off FW internal code */ 1043 retval &= 0xff; 1044 } 1045 cmd_completed = true; 1046 if (!status && retval != ICE_AQ_RC_OK) 1047 status = ICE_ERR_AQ_ERROR; 1048 cq->sq_last_status = (enum ice_aq_err)retval; 1049 } 1050 1051 ice_debug(hw, ICE_DBG_AQ_MSG, 1052 "ATQ: desc and buffer writeback:\n"); 1053 1054 ice_debug_cq(hw, (void *)desc, buf, buf_size); 1055 1056 /* save writeback AQ if requested */ 1057 if (details->wb_desc) 1058 memcpy(details->wb_desc, desc_on_ring, 1059 sizeof(*details->wb_desc)); 1060 1061 /* update the error if time out occurred */ 1062 if (!cmd_completed) { 1063 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1064 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1065 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1066 status = ICE_ERR_AQ_FW_CRITICAL; 1067 } else { 1068 ice_debug(hw, ICE_DBG_AQ_MSG, 1069 "Control Send Queue Writeback timeout.\n"); 1070 status = ICE_ERR_AQ_TIMEOUT; 1071 } 1072 } 1073 1074 sq_send_command_error: 1075 mutex_unlock(&cq->sq_lock); 1076 return status; 1077 } 1078 1079 /** 1080 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1081 * @desc: pointer to the temp descriptor (non DMA mem) 1082 * @opcode: the opcode can be used to decide which flags to turn off or on 1083 * 1084 * Fill the desc with default values 1085 */ 1086 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1087 { 1088 /* zero out the desc */ 1089 memset(desc, 0, sizeof(*desc)); 1090 desc->opcode = cpu_to_le16(opcode); 1091 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1092 } 1093 1094 /** 1095 * ice_clean_rq_elem 1096 * @hw: pointer to the HW struct 1097 * @cq: pointer to the specific Control queue 1098 * @e: event info from the receive descriptor, includes any buffers 1099 * @pending: number of events that could be left to process 1100 * 1101 * This function cleans one Admin Receive Queue element and returns 1102 * the contents through e. It can also return how many events are 1103 * left to process through 'pending'. 1104 */ 1105 enum ice_status 1106 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1107 struct ice_rq_event_info *e, u16 *pending) 1108 { 1109 u16 ntc = cq->rq.next_to_clean; 1110 enum ice_status ret_code = 0; 1111 struct ice_aq_desc *desc; 1112 struct ice_dma_mem *bi; 1113 u16 desc_idx; 1114 u16 datalen; 1115 u16 flags; 1116 u16 ntu; 1117 1118 /* pre-clean the event info */ 1119 memset(&e->desc, 0, sizeof(e->desc)); 1120 1121 /* take the lock before we start messing with the ring */ 1122 mutex_lock(&cq->rq_lock); 1123 1124 if (!cq->rq.count) { 1125 ice_debug(hw, ICE_DBG_AQ_MSG, 1126 "Control Receive queue not initialized.\n"); 1127 ret_code = ICE_ERR_AQ_EMPTY; 1128 goto clean_rq_elem_err; 1129 } 1130 1131 /* set next_to_use to head */ 1132 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1133 1134 if (ntu == ntc) { 1135 /* nothing to do - shouldn't need to update ring's values */ 1136 ret_code = ICE_ERR_AQ_NO_WORK; 1137 goto clean_rq_elem_out; 1138 } 1139 1140 /* now clean the next descriptor */ 1141 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1142 desc_idx = ntc; 1143 1144 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1145 flags = le16_to_cpu(desc->flags); 1146 if (flags & ICE_AQ_FLAG_ERR) { 1147 ret_code = ICE_ERR_AQ_ERROR; 1148 ice_debug(hw, ICE_DBG_AQ_MSG, 1149 "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1150 le16_to_cpu(desc->opcode), 1151 cq->rq_last_status); 1152 } 1153 memcpy(&e->desc, desc, sizeof(e->desc)); 1154 datalen = le16_to_cpu(desc->datalen); 1155 e->msg_len = min_t(u16, datalen, e->buf_len); 1156 if (e->msg_buf && e->msg_len) 1157 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1158 1159 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1160 1161 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1162 1163 /* Restore the original datalen and buffer address in the desc, 1164 * FW updates datalen to indicate the event message size 1165 */ 1166 bi = &cq->rq.r.rq_bi[ntc]; 1167 memset(desc, 0, sizeof(*desc)); 1168 1169 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1170 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1171 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1172 desc->datalen = cpu_to_le16(bi->size); 1173 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1174 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1175 1176 /* set tail = the last cleaned desc index. */ 1177 wr32(hw, cq->rq.tail, ntc); 1178 /* ntc is updated to tail + 1 */ 1179 ntc++; 1180 if (ntc == cq->num_rq_entries) 1181 ntc = 0; 1182 cq->rq.next_to_clean = ntc; 1183 cq->rq.next_to_use = ntu; 1184 1185 clean_rq_elem_out: 1186 /* Set pending if needed, unlock and return */ 1187 if (pending) { 1188 /* re-read HW head to calculate actual pending messages */ 1189 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1190 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1191 } 1192 clean_rq_elem_err: 1193 mutex_unlock(&cq->rq_lock); 1194 1195 return ret_code; 1196 } 1197